Skip to content

Interfaces#

toop_engine_interfaces #

toop_engine_interfaces.folder_structure #

Defines constants for the folder structure.

File: folder_structure.py Author: Benjamin Petrick Created: 2024-09-12

PREPROCESSING_PATHS module-attribute #

PREPROCESSING_PATHS = {
    "grid_file_path_powsybl": "grid.xiidm",
    "grid_file_path_pandapower": "grid.json",
    "network_data_file_path": "network_data.pkl",
    "masks_path": "masks",
    "static_information_file_path": "static_information.hdf5",
    "importer_auxiliary_file_path": "importer_auxiliary_data.json",
    "initial_topology_path": "initial_topology",
    "LF_CA_path": "initial_topology/LF_CA",
    "single_line_diagram_path": "initial_topology/single_line_diagram",
    "asset_topology_file_path": "initial_topology/asset_topology.json",
    "original_gridfile_path": "initial_topology/original_gridfile",
    "logs_path": "logs",
    "start_datetime_info_file_path": "logs/start_datetime.info",
    "chronics_path": "chronics",
    "action_set_file_path": "action_set.json",
    "nminus1_definition_file_path": "nminus1_definition.json",
    "ignore_file_path": "ignore_elements.csv",
    "contingency_list_file_path": "contingency_list.csv",
    "static_information_stats_file_path": "static_information_stats.json",
}

POSTPROCESSING_PATHS module-attribute #

POSTPROCESSING_PATHS = {
    "optimizer_snapshots_path": "optimizer_snapshots",
    "dc_optimizer_snapshots_path": "optimizer_snapshots/dc",
    "ac_optimizer_snapshots_path": "optimizer_snapshots/ac",
    "dc_plus_optimizer_snapshots_path": "optimizer_snapshots/dc_plus",
    "LF_CA_ac_path": "optimizer_snapshots/ac/LF_CA",
    "single_line_diagram_ac_path": "optimizer_snapshots/ac/single_line_diagram",
    "logs_path": "logs",
}

NETWORK_MASK_NAMES module-attribute #

NETWORK_MASK_NAMES = {
    "relevant_subs": "relevant_subs.npy",
    "line_for_nminus1": "line_for_nminus1.npy",
    "line_for_reward": "line_for_reward.npy",
    "line_overload_weight": "line_overload_weight.npy",
    "line_disconnectable": "line_disconnectable.npy",
    "line_tso_border": "line_tso_border.npy",
    "line_blacklisted": "line_blacklisted.npy",
    "trafo_for_nminus1": "trafo_for_nminus1.npy",
    "trafo_for_reward": "trafo_for_reward.npy",
    "trafo_overload_weight": "trafo_overload_weight.npy",
    "trafo_disconnectable": "trafo_disconnectable.npy",
    "trafo_dso_border": "trafo_dso_border.npy",
    "trafo_n0_n1_max_diff_factor": "trafo_n0_n1_max_diff_factor.npy",
    "trafo_blacklisted": "trafo_blacklisted.npy",
    "trafo_pst_controllable": "trafo_pst_controllable.npy",
    "trafo3w_for_nminus1": "trafo3w_for_nminus1.npy",
    "trafo3w_for_reward": "trafo3w_for_reward.npy",
    "trafo3w_overload_weight": "trafo3w_overload_weight.npy",
    "trafo3w_disconnectable": "trafo3w_disconnectable.npy",
    "trafo3w_n0_n1_max_diff_factor": "trafo3w_n0_n1_max_diff_factor.npy",
    "tie_line_for_reward": "tie_line_for_reward.npy",
    "tie_line_for_nminus1": "tie_line_for_nminus1.npy",
    "tie_line_overload_weight": "tie_line_overload_weight.npy",
    "tie_line_disconnectable": "tie_line_disconnectable.npy",
    "tie_line_tso_border": "tie_line_tso_border.npy",
    "dangling_line_for_nminus1": "dangling_line_for_nminus1.npy",
    "generator_for_nminus1": "generator_for_nminus1.npy",
    "load_for_nminus1": "load_for_nminus1.npy",
    "switch_for_nminus1": "switch_for_nminus1.npy",
    "switch_for_reward": "switch_for_reward.npy",
    "cross_coupler_limits": "cross_coupler_limits.npy",
    "sgen_for_nminus1": "sgen_for_nminus1.npy",
    "busbar_for_nminus1": "busbar_for_nminus1.npy",
}

OUTPUT_FILE_NAMES module-attribute #

OUTPUT_FILE_NAMES = {
    "multiple_topologies": "repertoire.json",
    "realized_asset_topology": "asset_topology.json",
    "postprocessed_topology": "topology.json",
    "loadflows_ac": "loadflows_ac.hdf5",
    "loadflows_dc": "loadflows_dc.hdf5",
    "loadflows_ac_cross_coupler": "loadflows_ac_cross_coupler.hdf5",
    "loadflows_dc_cross_coupler": "loadflows_dc_cross_coupler.hdf5",
}

CHRONICS_FILE_NAMES module-attribute #

CHRONICS_FILE_NAMES = {
    "load_p": "load_p.npy",
    "gen_p": "gen_p.npy",
    "sgen_p": "sgen_p.npy",
    "dcline_p": "dcline_p.npy",
}

toop_engine_interfaces.backend #

The abstract interface definition for accessing data from pandapower/powerfactory/...

BackendInterface #

Bases: ABC

Interface for the backend.

The task of this interface is to provide routines for accessing data from the grid modelling software (pandapower/powerfactory/...)

Specifically not task of this interface is to perform any validations or processing of the data

This assume a node-branch model, hence busbars would be nodes and lines, trafos, etc would be branches. Injections inject onto a node and represent both generators, loads, sgens, ...

get_ptdf #

get_ptdf()

Get the PTDF matrix, if it was computed already

For the relevant substations it is important that only node A is given as a column in the reference topology. This is to ensure node A and B are treated properly by the algorithm.

If None is returned, the PTDF matrix will be computed by the solver based on from_node, to_node and susceptance.

RETURNS DESCRIPTION
Float[ndarray, ' n_branch n_node']

The unextended PTDF matrix, not including second nodes for the relevant substations, and not including the PSDF.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
def get_ptdf(self) -> Optional[Float[np.ndarray, " n_branch n_node"]]:
    """Get the PTDF matrix, if it was computed already

    For the relevant substations it is important that only node A is given
    as a column in the reference topology. This is to ensure node A and B
    are treated properly by the algorithm.

    If None is returned, the PTDF matrix will be computed by the solver based on
    from_node, to_node and susceptance.

    Returns
    -------
    Float[np.ndarray, " n_branch n_node"]
        The unextended PTDF matrix, not including second nodes for the relevant
        substations, and not including the PSDF.
    """
    return None

get_psdf #

get_psdf()

Get the PSDF matrix, if it was computed already

If None is returned, the PSDF matrix will be computed by the solver based on shift_angle and susceptance.

This refers to the already reduced PSDF matrix, i.e. without elements that will never have a shift angle. See get_phase_shifters for more information.

RETURNS DESCRIPTION
Float[ndarray, ' n_branch n_phaseshifters']

The PSDF matrix, not including the PTDF.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
def get_psdf(self) -> Optional[Float[np.ndarray, " n_branch n_phaseshifters"]]:
    """Get the PSDF matrix, if it was computed already

    If None is returned, the PSDF matrix will be computed by the solver based on
    shift_angle and susceptance.

    This refers to the already reduced PSDF matrix, i.e. without elements that will
    never have a shift angle. See get_phase_shifters for more information.

    Returns
    -------
    Float[np.ndarray, " n_branch n_phaseshifters"]
        The PSDF matrix, not including the PTDF.
    """
    return None

get_slack abstractmethod #

get_slack()

Get the index of the slack node

Note that the solver does not support distributed slack nodes, if you have a distributed slack, replace all but one slack node by their injections or create a virtual slack node that is connected with same-impendance lines to the other slack nodes.

RETURNS DESCRIPTION
int

The index of the slack node

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_slack(self) -> int:
    """Get the index of the slack node

    Note that the solver does not support distributed slack nodes, if you have a
    distributed slack, replace all but one slack node by their injections or create
    a virtual slack node that is connected with same-impendance lines to the other
    slack nodes.

    Returns
    -------
    int
        The index of the slack node
    """

get_ac_dc_mismatch #

get_ac_dc_mismatch()

Get the AC-DC mismatch for each branch

This is the difference between the AC and DC flow on each branch, i.e. the difference between the AC and DC loadflow results.

This is used in the solver to adjust the DC flow to match the AC flow in the N-0 case. If all zeros are returned, the solver will return pure DC flows.

Positive values mean the AC flow is higher than the DC flow, negative values mean the AC flow is lower than the DC flow.

RETURNS DESCRIPTION
Float[ndarray, ' n_timestep n_branch']

The AC-DC mismatch for each branch and per timestep

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
def get_ac_dc_mismatch(self) -> Float[np.ndarray, " n_timestep n_branch"]:
    """Get the AC-DC mismatch for each branch

    This is the difference between the AC and DC flow on each branch, i.e. the
    difference between the AC and DC loadflow results.

    This is used in the solver to adjust the DC flow to match the AC flow in the N-0 case.
    If all zeros are returned, the solver will return pure DC flows.

    Positive values mean the AC flow is higher than the DC flow, negative values mean the
    AC flow is lower than the DC flow.

    Returns
    -------
    Float[np.ndarray, " n_timestep n_branch"]
        The AC-DC mismatch for each branch and per timestep
    """
    return np.zeros_like(self.get_max_mw_flows(), dtype=float)

get_max_mw_flows abstractmethod #

get_max_mw_flows()

Get the maximum flow per branch

The timestep dimension is added to represent temperature-dependent capacity limits. If the capacity limits are not temperature-dependent, the same value should be returned for all timesteps.

RETURNS DESCRIPTION
Float[ndarray, ' n_timestep n_branch']

The maximum flow per branch and per timestep

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_max_mw_flows(self) -> Float[np.ndarray, " n_timestep n_branch"]:
    """Get the maximum flow per branch

    The timestep dimension is added to represent temperature-dependent capacity
    limits. If the capacity limits are not temperature-dependent, the same value
    should be returned for all timesteps.

    Returns
    -------
    Float[np.ndarray, " n_timestep n_branch"]
        The maximum flow per branch and per timestep
    """

get_max_mw_flows_n_1 #

get_max_mw_flows_n_1()

Get a varying max flow for N-1 if there is a difference or NaN.

In some circumstances, a higher N-1 load is allowed than N-0 as N-1 leaves some time to address an overload in practice - a line won't melt right away if it's overloaded for a few seconds until the operators can react.

If not overloaded, returns all NaNs and hence the values from get_max_mw_flows will be used.

RETURNS DESCRIPTION
Float[ndarray, ' n_timestep n_branch']

The maximum flow per branch and per timestep if overridden, else NaN

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
def get_max_mw_flows_n_1(self) -> Float[np.ndarray, " n_timestep n_branch"]:
    """Get a varying max flow for N-1 if there is a difference or NaN.

    In some circumstances, a higher N-1 load is allowed than N-0 as N-1 leaves some time to
    address an overload in practice - a line won't melt right away if it's overloaded for a
    few seconds until the operators can react.

    If not overloaded, returns all NaNs and hence the values from get_max_mw_flows will be
    used.

    Returns
    -------
    Float[np.ndarray, " n_timestep n_branch"]
        The maximum flow per branch and per timestep if overridden, else NaN
    """
    return np.full_like(self.get_max_mw_flows(), np.nan)

get_overload_weights #

get_overload_weights()

Get a factor that the overloads are multiplied with for each branch

This can be used to penalize overloads on certain branches more than on others.

If this funcion is not overloaded, returns all ones.

RETURNS DESCRIPTION
Float[ndarray, ' n_branch']

The overload weights for each branch

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
def get_overload_weights(self) -> Float[np.ndarray, " n_branch"]:
    """Get a factor that the overloads are multiplied with for each branch

    This can be used to penalize overloads on certain branches more than on others.

    If this funcion is not overloaded, returns all ones.

    Returns
    -------
    Float[np.ndarray, " n_branch"]
        The overload weights for each branch
    """
    return np.ones(self.get_max_mw_flows().shape[-1])

get_n0_n1_max_diff_factors #

get_n0_n1_max_diff_factors()

Get limits for the relative difference between N-0 and N-1 flows.

This is an array of factors to the base case flows. Negative factors or NaN values mean the branch will be ignored and always have a penalty of 0. For example if a branch has a 20 MW diff between N-0 and N-1 in the base case (in the unsplit configuration) and the factor is 2, then the maximum allowed diff for the n0_n1_delta penalty would be 40 MW. If a negative factor is used, this branch has no N-0 to N-1 maximum delta and will always incur a penalty of 0. See dc_solver.jax.aggregate_results.compute_n0_n1_max_diff for how these factors are used

If this function is not overloaded, returns all minus ones (i.e. no branch has a limit).

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
def get_n0_n1_max_diff_factors(self) -> Float[np.ndarray, " n_branch"]:
    """Get limits for the relative difference between N-0 and N-1 flows.

    This is an array of factors to the base case flows. Negative factors or NaN values mean the
    branch will be ignored and always have a penalty of 0.
    For example if a branch has a 20 MW diff between N-0 and N-1 in the base case (in the
    unsplit configuration) and the factor is 2, then the maximum allowed diff for the
    n0_n1_delta penalty would be 40 MW. If a negative factor is used, this branch has no
    N-0 to N-1 maximum delta and will always incur a penalty of 0. See
    dc_solver.jax.aggregate_results.compute_n0_n1_max_diff for how these factors are used

    If this function is not overloaded, returns all minus ones (i.e. no branch has a limit).
    """
    return -np.ones(self.get_max_mw_flows().shape[-1])

get_cross_coupler_limits #

get_cross_coupler_limits()

Get the cross-coupler limits for each relevant substation.

Returns over all buses to match conventions and if relevant substations are modified independently of the cross-coupler limits.

The limits are a P[MW] Value for each coupler.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
def get_cross_coupler_limits(self) -> Float[np.ndarray, " n_bus"]:
    """Get the cross-coupler limits for each relevant substation.

    Returns over all buses to match conventions and if relevant substations are modified
    independently of the cross-coupler limits.

    The limits are a P[MW] Value for each coupler.
    """
    return np.zeros(self.get_relevant_node_mask().shape, dtype=float)

get_susceptances abstractmethod #

get_susceptances()

Get the susceptances of the branches

RETURNS DESCRIPTION
Float[ndarray, ' n_branch']

The susceptances of the branches

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_susceptances(self) -> Float[np.ndarray, " n_branch"]:
    """Get the susceptances of the branches

    Returns
    -------
    Float[np.ndarray, " n_branch"]
        The susceptances of the branches
    """

get_from_nodes abstractmethod #

get_from_nodes()

Get the from nodes of the branches

RETURNS DESCRIPTION
Int[ndarray, ' n_branch']

The from nodes of the branches

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_from_nodes(self) -> Int[np.ndarray, " n_branch"]:
    """Get the from nodes of the branches

    Returns
    -------
    Int[np.ndarray, " n_branch"]
        The from nodes of the branches
    """

get_to_nodes abstractmethod #

get_to_nodes()

Get the to nodes of the branches

RETURNS DESCRIPTION
Int[ndarray, ' n_branch']

The to nodes of the branches

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_to_nodes(self) -> Int[np.ndarray, " n_branch"]:
    """Get the to nodes of the branches

    Returns
    -------
    Int[np.ndarray, " n_branch"]
        The to nodes of the branches
    """

get_controllable_pst_node_mask #

get_controllable_pst_node_mask()

Get the mask of controllable phase shifters over nodes

True means a node is (bogus node and) a controllable phase shifter, i.e. is connected to a branch that is a controllable phase shifter. False means it normal node.

RETURNS DESCRIPTION
Bool[ndarray, ' n_node']

The mask of controllable phase shifters over nodes

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
def get_controllable_pst_node_mask(self) -> Bool[np.ndarray, " n_node"]:
    """Get the mask of controllable phase shifters over nodes

    True means a node is (bogus node and) a controllable phase shifter, i.e. is connected to a branch
    that is a controllable phase shifter. False means it normal node.

    Returns
    -------
    Bool[np.ndarray, " n_node"]
        The mask of controllable phase shifters over nodes
    """
    # TODO: Implement in backends
    return np.zeros([], dtype=bool)

get_shift_angles abstractmethod #

get_shift_angles()

Get the shift angles of the branches in degree

The timestep dimension is added to represent time-varying phase shift angles. If the phase shift angles are not time-varying, the same value should be returned for all timesteps.

RETURNS DESCRIPTION
Float[ndarray, ' n_timestep n_branch']

The shift angles of the branches

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_shift_angles(self) -> Float[np.ndarray, " n_timestep n_branch"]:
    """Get the shift angles of the branches in degree

    The timestep dimension is added to represent time-varying phase shift angles.
    If the phase shift angles are not time-varying, the same value should be returned
    for all timesteps.

    Returns
    -------
    Float[np.ndarray, " n_timestep n_branch"]
        The shift angles of the branches
    """

get_phase_shift_mask abstractmethod #

get_phase_shift_mask()

Get the mask of phase shifters

True means a branch is a phase shifter, i.e. can have shift_degree != 0 False means it is not a phase shifter. Note that the controllable phase shifters are a subset of this, i.e. not every phase shifter is controllable.

RETURNS DESCRIPTION
Bool[ndarray, ' n_branch']

The mask of phase shifters

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_phase_shift_mask(self) -> Bool[np.ndarray, " n_branch"]:
    """Get the mask of phase shifters

    True means a branch is a phase shifter, i.e. can have shift_degree != 0
    False means it is not a phase shifter. Note that the controllable phase shifters are a subset of this, i.e. not every
    phase shifter is controllable.

    Returns
    -------
    Bool[np.ndarray, " n_branch"]
        The mask of phase shifters
    """

get_controllable_phase_shift_mask #

get_controllable_phase_shift_mask()

Which of the phase shifters are controllable

This must be a subset of get_phase_shift_mask()

RETURNS DESCRIPTION
Bool[ndarray, ' n_branch']

The mask of controllable phase shifters

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
def get_controllable_phase_shift_mask(self) -> Bool[np.ndarray, " n_branch"]:
    """Which of the phase shifters are controllable

    This must be a subset of get_phase_shift_mask()

    Returns
    -------
    Bool[np.ndarray, " n_branch"]
        The mask of controllable phase shifters
    """
    return np.zeros_like(self.get_phase_shift_mask())

get_phase_shift_taps #

get_phase_shift_taps()

Return the possible tap positions of each controllable PST.

The outer list has as many entries as there are controllable PSTs (see controllable_phase_shift_mask). The inner np array has as many entries as there are taps for the given PST with each value representing the angle shift for the given tap position. The taps are ordered smallest to largest angle shift. Each controllable PST must have at least one tap position.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
def get_phase_shift_taps(self) -> list[Float[np.ndarray, " n_tap_positions"]]:
    """Return the possible tap positions of each controllable PST.

    The outer list has as many entries as there are controllable PSTs (see
    controllable_phase_shift_mask). The inner np array has as many entries as there are taps for the given PST with each
    value representing the angle shift for the given tap position. The taps are ordered smallest to largest angle shift.
    Each controllable PST must have at least one tap position.
    """
    # Get the viable shift from the zeroth timestep as a viable default value if the user hasn't overloaded the function
    viable_shifts = self.get_shift_angles()[0, self.get_controllable_phase_shift_mask()]
    return [np.array([shift]) for shift in viable_shifts]

get_phase_shift_starting_taps #

get_phase_shift_starting_taps()

Get the starting tap position for each controllable PST, given as an integer index into pst_tap_values.

Note that taps in the original grid model might not start at zero, while in our optimization we assume taps to always be zero terminated. To translate back into original grid model, add get_phase_shift_low_taps.

The outer list has as many entries as there are controllable PSTs (see controllable_phase_shift_mask). The inner np array has as many entries as there are taps for the given PST with each value representing the angle shift for the given tap position. The taps are ordered smallest to largest angle shift.

If this function is not overloaded, it is assumed that all controllable PSTs start at their lowest tap position (i.e. index 0).

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
def get_phase_shift_starting_taps(self) -> Int[np.ndarray, " n_controllable_pst"]:
    """Get the starting tap position for each controllable PST, given as an integer index into pst_tap_values.

    Note that taps in the original grid model might not start at zero, while in our optimization we assume taps to always
    be zero terminated. To translate back into original grid model, add get_phase_shift_low_taps.

    The outer list has as many entries as there are controllable PSTs (see
    controllable_phase_shift_mask). The inner np array has as many entries as there are taps for the given PST with each
    value representing the angle shift for the given tap position. The taps are ordered smallest to largest angle shift.

    If this function is not overloaded, it is assumed that all controllable PSTs start at their lowest tap position
    (i.e. index 0).
    """
    return np.zeros(sum(self.get_controllable_phase_shift_mask()), dtype=int)

get_phase_shift_low_taps #

get_phase_shift_low_taps()

Get the lowest tap position in the original grid model

Original taps are needed so taps as integer indices into tap values can be converted back to the original tap positions by tap + low_tap

If this function is not overloaded, it is assumed that all controllable PSTs have a low tap of 0.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
def get_phase_shift_low_taps(self) -> Int[np.ndarray, " n_controllable_psts"]:
    """Get the lowest tap position in the original grid model

    Original taps are needed so taps as integer indices into tap values
    can be converted back to the original tap positions by tap + low_tap

    If this function is not overloaded, it is assumed that all controllable PSTs have a low tap of 0.
    """
    return np.zeros(sum(self.get_controllable_phase_shift_mask()), dtype=int)

get_relevant_node_mask abstractmethod #

get_relevant_node_mask()

Get true if a node is part of the relevant nodes

This refers to the node A (the node that is present in the un-extended PTDF) of the relevant substations. The relevant nodes are those that can be split later on in the solver.

RETURNS DESCRIPTION
Bool[ndarray, ' n_node']

The mask over nodes, indicating if they are relevant (splittable)

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_relevant_node_mask(self) -> Bool[np.ndarray, " n_node"]:
    """Get true if a node is part of the relevant nodes

    This refers to the node A (the node that is present in the un-extended PTDF) of the
    relevant substations. The relevant nodes are those that can be split later on
    in the solver.

    Returns
    -------
    Bool[np.ndarray, " n_node"]
        The mask over nodes, indicating if they are relevant (splittable)
    """

get_monitored_branch_mask abstractmethod #

get_monitored_branch_mask()

Get the mask of monitored branches for the reward calculation

True means a branch is monitored, False means it is not monitored

RETURNS DESCRIPTION
Bool[ndarray, ' n_branch']

The mask of monitored branches

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_monitored_branch_mask(self) -> Bool[np.ndarray, " n_branch"]:
    """Get the mask of monitored branches for the reward calculation

    True means a branch is monitored, False means it is not monitored

    Returns
    -------
    Bool[np.ndarray, " n_branch"]
        The mask of monitored branches
    """

get_branches_in_maintenance abstractmethod #

get_branches_in_maintenance()

Get the mask of branches in maintenance

True means a branch is in maintenance, False means it is not in maintenance

The timestep dimension is added to represent time-varying maintenance schedules. If the maintenance schedules are not time-varying, the same value should be returned for all timesteps.

RETURNS DESCRIPTION
Bool[ndarray, ' n_timestep n_branch']

The mask of branches in maintenance

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_branches_in_maintenance(
    self,
) -> Bool[np.ndarray, " n_timestep n_branch"]:
    """Get the mask of branches in maintenance

    True means a branch is in maintenance, False means it is not in maintenance

    The timestep dimension is added to represent time-varying maintenance schedules.
    If the maintenance schedules are not time-varying, the same value should be returned
    for all timesteps.

    Returns
    -------
    Bool[np.ndarray, " n_timestep n_branch"]
        The mask of branches in maintenance
    """

get_disconnectable_branch_mask abstractmethod #

get_disconnectable_branch_mask()

Get the mask of disconnectable branches

True means a branch is disconnectable as a remedial action, False means it must stay online

RETURNS DESCRIPTION
Bool[ndarray, ' n_branch']

The mask of disconnectable branches

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_disconnectable_branch_mask(self) -> Bool[np.ndarray, " n_branch"]:
    """Get the mask of disconnectable branches

    True means a branch is disconnectable as a remedial action, False means it must stay online

    Returns
    -------
    Bool[np.ndarray, " n_branch"]
        The mask of disconnectable branches
    """

get_outaged_branch_mask abstractmethod #

get_outaged_branch_mask()

Get the mask of outaged branches for the N-1 computation

True means a branch is outaged, False means it is not outaged

RETURNS DESCRIPTION
Bool[ndarray, ' n_branch']

The mask of outaged branches

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_outaged_branch_mask(self) -> Bool[np.ndarray, " n_branch"]:
    """Get the mask of outaged branches for the N-1 computation

    True means a branch is outaged, False means it is not outaged

    Returns
    -------
    Bool[np.ndarray, " n_branch"]
        The mask of outaged branches
    """

get_outaged_injection_mask abstractmethod #

get_outaged_injection_mask()

Get the mask of outaged injections for the N-1 computation

True means an injection is outaged, False means it is not outaged

RETURNS DESCRIPTION
Bool[ndarray, ' n_injection']

The mask of outaged injections

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_outaged_injection_mask(self) -> Bool[np.ndarray, " n_injection"]:
    """Get the mask of outaged injections for the N-1 computation

    True means an injection is outaged, False means it is not outaged

    Returns
    -------
    Bool[np.ndarray, " n_injection"]
        The mask of outaged injections
    """

get_multi_outage_branches abstractmethod #

get_multi_outage_branches()

Get the mask of outaged branches for potential multi-outages

True means a branch is outaged, False means it is not outaged.

get_multi_outage_branches, get_multi_outage_nodes and get_multi_outage_names have to return the same first dimension, i.e. the same number of multi-outages.

RETURNS DESCRIPTION
Bool[ndarray, ' n_multi_outages n_branch']

The mask of outaged branches for every multi-outage

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_multi_outage_branches(
    self,
) -> Bool[np.ndarray, " n_multi_outages n_branch"]:
    """Get the mask of outaged branches for potential multi-outages

    True means a branch is outaged, False means it is not outaged.

    get_multi_outage_branches, get_multi_outage_nodes and get_multi_outage_names have to return
    the same first dimension, i.e. the same number of multi-outages.

    Returns
    -------
    Bool[np.ndarray, " n_multi_outages n_branch"]
        The mask of outaged branches for every multi-outage
    """

get_multi_outage_nodes abstractmethod #

get_multi_outage_nodes()

Get the mask of outaged nodes for potential multi-outages

True means a node is outaged, False means it is not outaged.

get_multi_outage_branches, get_multi_outage_nodes and get_multi_outage_names have to return the same first dimension, i.e. the same number of multi-outages.

RETURNS DESCRIPTION
Bool[ndarray, ' n_multi_outages n_node']

The mask of outaged nodes for every multi-outage

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_multi_outage_nodes(
    self,
) -> Bool[np.ndarray, " n_multi_outages n_node"]:
    """Get the mask of outaged nodes for potential multi-outages

    True means a node is outaged, False means it is not outaged.

    get_multi_outage_branches, get_multi_outage_nodes and get_multi_outage_names have to return
    the same first dimension, i.e. the same number of multi-outages.

    Returns
    -------
    Bool[np.ndarray, " n_multi_outages n_node"]
        The mask of outaged nodes for every multi-outage
    """

get_injection_nodes abstractmethod #

get_injection_nodes()

Get the node index of the injections

RETURNS DESCRIPTION
Int[ndarray, ' n_injection']

The node index that the injection injects onto

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_injection_nodes(self) -> Int[np.ndarray, " n_injection"]:
    """Get the node index of the injections

    Returns
    -------
    Int[np.ndarray, " n_injection"]
        The node index that the injection injects onto
    """

get_mw_injections abstractmethod #

get_mw_injections()

Get the MW injections of the injections

The timestep dimension is added to represent time-varying injections. If the injections are not time-varying, the same value should be returned for all timesteps.

RETURNS DESCRIPTION
Float[ndarray, ' n_timestep n_injection']

The MW injections of the injections

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_mw_injections(self) -> Float[np.ndarray, " n_timestep n_injection"]:
    """Get the MW injections of the injections

    The timestep dimension is added to represent time-varying injections.
    If the injections are not time-varying, the same value should be returned
    for all timesteps.

    Returns
    -------
    Float[np.ndarray, " n_timestep n_injection"]
        The MW injections of the injections
    """

get_base_mva abstractmethod #

get_base_mva()

Get the baseMVA of the grid

RETURNS DESCRIPTION
float

The base MVA of the grid

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_base_mva(self) -> float:
    """Get the baseMVA of the grid

    Returns
    -------
    float
        The base MVA of the grid
    """

get_asset_topology #

get_asset_topology()

Get the asset topology of the grid.

If given, the asset topology for the grid can be returned, describing more information about the physical layout of the stations

RETURNS DESCRIPTION
Optional[Topology]

The asset topology of the grid

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
def get_asset_topology(self) -> Optional[Topology]:
    """Get the asset topology of the grid.

    If given, the asset topology for the grid can be returned, describing more
    information about the physical layout of the stations

    Returns
    -------
    Optional[Topology]
        The asset topology of the grid
    """
    return None

get_node_ids abstractmethod #

get_node_ids()

Get the ids of the nodes as a Sequence of length N_node

RETURNS DESCRIPTION
Union[Sequence[str], Sequence[int]]

The ids of the nodes

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_node_ids(
    self,
) -> Union[Sequence[str], Sequence[int]]:
    """Get the ids of the nodes as a Sequence of length N_node

    Returns
    -------
    Union[Sequence[str], Sequence[int]]
        The ids of the nodes
    """

get_branch_ids abstractmethod #

get_branch_ids()

Get the ids of the branches as a Sequence of length N_branch

RETURNS DESCRIPTION
Union[Sequence[str], Sequence[int]]

The ids of the branches

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_branch_ids(
    self,
) -> Union[Sequence[str], Sequence[int]]:
    """Get the ids of the branches as a Sequence of length N_branch

    Returns
    -------
    Union[Sequence[str], Sequence[int]]
        The ids of the branches
    """

get_injection_ids abstractmethod #

get_injection_ids()

Get the ids of the injections as a Sequence of length N_injection

RETURNS DESCRIPTION
Union[Sequence[str], Sequence[int]]

The ids of the injections

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_injection_ids(
    self,
) -> Union[Sequence[str], Sequence[int]]:
    """Get the ids of the injections as a Sequence of length N_injection

    Returns
    -------
    Union[Sequence[str], Sequence[int]]
        The ids of the injections
    """

get_multi_outage_ids abstractmethod #

get_multi_outage_ids()

Get the ids of the multi-outages as a Sequence of length N_multi_outages

RETURNS DESCRIPTION
Union[Sequence[str], Sequence[int]]

The ids of the multi-outages

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_multi_outage_ids(self) -> Union[Sequence[str], Sequence[int]]:
    """Get the ids of the multi-outages as a Sequence of length N_multi_outages

    Returns
    -------
    Union[Sequence[str], Sequence[int]]
        The ids of the multi-outages
    """

get_node_names abstractmethod #

get_node_names()

Get the names of the nodes as a Sequence of length N_node

RETURNS DESCRIPTION
Sequence[str]

The names of the nodes

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_node_names(self) -> Sequence[str]:
    """Get the names of the nodes as a Sequence of length N_node

    Returns
    -------
    Sequence[str]
        The names of the nodes
    """

get_branch_names abstractmethod #

get_branch_names()

Get the names of the branches as a Sequence of length N_branch

RETURNS DESCRIPTION
Sequence[str]

The names of the branches

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_branch_names(self) -> Sequence[str]:
    """Get the names of the branches as a Sequence of length N_branch

    Returns
    -------
    Sequence[str]
        The names of the branches
    """

get_injection_names abstractmethod #

get_injection_names()

Get the names of the injections

RETURNS DESCRIPTION
Sequence[str]

The names of the injections

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_injection_names(self) -> Sequence[str]:
    """Get the names of the injections

    Returns
    -------
    Sequence[str]
        The names of the injections
    """

get_multi_outage_names abstractmethod #

get_multi_outage_names()

Get the names of the multi-outages as a Sequence of length N_multi_outages

If more than one element are involved in a multi-outage you can return a concatenated name

RETURNS DESCRIPTION
Sequence[str]

The names of the multi-outages

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_multi_outage_names(self) -> Sequence[str]:
    """Get the names of the multi-outages as a Sequence of length N_multi_outages

    If more than one element are involved in a multi-outage you can return a concatenated name

    Returns
    -------
    Sequence[str]
        The names of the multi-outages
    """

get_branch_types abstractmethod #

get_branch_types()

Get the type of the branches

RETURNS DESCRIPTION
Sequence[str]

The type of the branches

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_branch_types(self) -> Sequence[str]:
    """Get the type of the branches

    Returns
    -------
    Sequence[str]
        The type of the branches
    """

get_node_types abstractmethod #

get_node_types()

Get the type of the nodes

RETURNS DESCRIPTION
Sequence[str]

The type of the nodes

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_node_types(self) -> Sequence[str]:
    """Get the type of the nodes

    Returns
    -------
    Sequence[str]
        The type of the nodes
    """

get_injection_types abstractmethod #

get_injection_types()

Get the type of the injections

RETURNS DESCRIPTION
Sequence[str]

The type of the injections

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_injection_types(self) -> Sequence[str]:
    """Get the type of the injections

    Returns
    -------
    Sequence[str]
        The type of the injections
    """

get_multi_outage_types abstractmethod #

get_multi_outage_types()

Get the type of the multi-outages as a Sequence of length N_multi_outages

RETURNS DESCRIPTION
Sequence[str]

The type of the multi-outages

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_multi_outage_types(self) -> Sequence[str]:
    """Get the type of the multi-outages as a Sequence of length N_multi_outages

    Returns
    -------
    Sequence[str]
        The type of the multi-outages
    """

get_metadata abstractmethod #

get_metadata()

Can be used to return metadata or additional information about the grid.

This is not used by the solver but rather to easy postprocessing and validation. You can return an empty dict if you don't want to use this field.

RETURNS DESCRIPTION
dict

The metadata of the grid

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
@abstractmethod
def get_metadata(self) -> dict:
    """Can be used to return metadata or additional information about the grid.

    This is not used by the solver but rather to easy postprocessing and validation. You can
    return an empty dict if you don't want to use this field.

    Returns
    -------
    dict
        The metadata of the grid
    """

get_busbar_outage_map #

get_busbar_outage_map()

Get the mapping of stations to busbars for the busbar-outages

The key of the dict is the station's grid_model_id and the value is a list of grid_mdoel_ids of the busbars that have to be outaged. If this method is not overloaded, all the physical busbars of the relevant stations will be outaged.

RETURNS DESCRIPTION
Optional[dict[str, Sequence[str]]]

The mapping of busbar-outages to the relevant nodes

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/backend.py
def get_busbar_outage_map(
    self,
) -> Optional[dict[str, Sequence[str]]]:
    """Get the mapping of stations to busbars for the busbar-outages

    The key of the dict is the station's grid_model_id and the value is a list of grid_mdoel_ids
    of the busbars that have to be outaged. If this method is not overloaded, all the physical
    busbars of the relevant stations will be outaged.

    Returns
    -------
    Optional[dict[str, Sequence[str]]]
        The mapping of busbar-outages to the relevant nodes
    """
    return None

toop_engine_interfaces.types #

Define global type aliases.

Currently this holds only the MetricType, a Literal of all possible metrics.

The meaning of the metrics is documented in metrics.md in the docs folder.

MatrixMetric module-attribute #

MatrixMetric = Literal[
    "max_flow_n_0",
    "median_flow_n_0",
    "overload_energy_n_0",
    "underload_energy_n_0",
    "overload_energy_limited_n_0",
    "exponential_overload_energy_n_0",
    "exponential_overload_energy_limited_n_0",
    "critical_branch_count_n_0",
    "critical_branch_count_limited_n_0",
    "max_flow_n_1",
    "median_flow_n_1",
    "overload_energy_n_1",
    "underload_energy_n_1",
    "overload_energy_limited_n_1",
    "exponential_overload_energy_n_1",
    "exponential_overload_energy_limited_n_1",
    "critical_branch_count_n_1",
    "critical_branch_count_limited_n_1",
    "top_k_overloads_n_1",
    "cumulative_overload_n_0",
    "cumulative_overload_n_1",
]

OperationMetric module-attribute #

OperationMetric = Literal[
    "switching_distance",
    "split_subs",
    "disconnected_branches",
]

OtherMetric module-attribute #

OtherMetric = Literal[
    "n0_n1_delta",
    "cross_coupler_flow",
    "n_2_penalty",
    "bb_outage_penalty",
    "bb_outage_overload",
    "bb_outage_grid_splits",
    "max_va_across_coupler",
    "max_va_diff_n_0",
    "max_va_diff_n_1",
    "overload_current_n_0",
    "overload_current_n_1",
    "non_converging_loadflows",
    "fitness_dc",
]

MetricType module-attribute #

MetricType = Literal[
    MatrixMetric, OperationMetric, OtherMetric
]

toop_engine_interfaces.stored_action_set #

Holds a format for storing the action set for later use in postprocessing.

This is different from the jax-internal action set as defined in jax/types.py where only jax-relevant information is stored, but is instead aimed at use in postprocessing and visualization. Instead of just storing the electrical switching state, this bases on the asset topology to store physical switchings to make a translation to .dgs or other formats easier.

One of the decisions to take was was whether to use a single action set for all timesteps or a different one for each timestep. As the jax part currently also only supports one action set for all timesteps, we decide to mirror this for the time being, i.e. we do not store strategies but topologies in the action set.

Furthermore, it should also be possible to use a global action set if necessary. Meaning, by default an action is substation-local, but it it can span multiple substations as well. Using a format that is suitable for both options is desirable for easier collaboration.

Another question was whether to store the switching distance and busbar information in the action set, but the switching distance can be trivially recomputed by using the station_diff between the starting topology and the station in the action set. BB outage information can also be retrieved from the asset topology.

There is a slim hope of storing the action set independent of the grid state but based on the master grid, however right now there is a fundamental way that 'binds' an action set to the specific grid it has been computed on: During the enumerations, all electrical actions are enumerated and then physical realizations are found for it based on heuristics. These heuristics take the grid state into account, so it could be that an electrical action can not be realized the same way if maintenances are active. Hence, for the moment, it is no problem to tie the initial topology into the action set.

PSTRange #

Bases: GridElement

Phase shifting transformers can be set within the scope of non-costly optimization.

A PST has a list of taps, each with an angle shift.

starting_tap instance-attribute #

starting_tap

The tap the PST was set to before optimization. To filter out actions that do not change anything in the UI, this is required.

low_tap instance-attribute #

low_tap

The lowest tap the PST supports

high_tap instance-attribute #

high_tap

The highest tap the PST supports

id instance-attribute #

id

The id of the element. For powsybl grids this is the global string id, for pandapower this is the integer index into the dataframe

name class-attribute instance-attribute #

name = ''

The name of the element. This is optional, but can be used to provide a more human-readable name for the element.

type instance-attribute #

type

For pandapower, we need to further specify a type which corresponds to the table pandapower stores the information in. Valid tables are 'line', 'trafo', 'ext_grid', 'gen', 'load', 'shunt', ... For powsybl, this is not strictly needed to identify the element however it makes it easier. In that case, type will be something like TIE_LINE, LINE, TWO_WINDING_TRANSFORMER, GENERATOR, etc.

kind instance-attribute #

kind

The kind of the element. Usually these are handled differently in the grid modelling software, so it can make assembling an N-1 analysis easier if it is known if the element is a branch, bus or injection. This could be inferred from the type, however for conveniece it is stored separately.

For the bus type there is some potential confusion in powsybl. In pandapower, this always refers to the net.bus df. In powsybl in a bus/branch model, there are no busbar sections in powsybl, i.e. net.get_node_breaker_topology does not deliver busbar sections. Meaning, the "bus" type refers to the net.get_bus_breaker_topology buses if it's a bus/breaker topology bus. If it's a node/breaker topology, then "bus" refers to the busbar section.

HVDCRange #

Bases: GridElement

High voltage direct current lines can be set within the scope of non-costly optimization.

An HVDC has a minimum and maximum power setpoint

min_power instance-attribute #

min_power

The lowest power setpoint the HVDC supports

max_power instance-attribute #

max_power

The highest power setpoint the HVDC supports

id instance-attribute #

id

The id of the element. For powsybl grids this is the global string id, for pandapower this is the integer index into the dataframe

name class-attribute instance-attribute #

name = ''

The name of the element. This is optional, but can be used to provide a more human-readable name for the element.

type instance-attribute #

type

For pandapower, we need to further specify a type which corresponds to the table pandapower stores the information in. Valid tables are 'line', 'trafo', 'ext_grid', 'gen', 'load', 'shunt', ... For powsybl, this is not strictly needed to identify the element however it makes it easier. In that case, type will be something like TIE_LINE, LINE, TWO_WINDING_TRANSFORMER, GENERATOR, etc.

kind instance-attribute #

kind

The kind of the element. Usually these are handled differently in the grid modelling software, so it can make assembling an N-1 analysis easier if it is known if the element is a branch, bus or injection. This could be inferred from the type, however for conveniece it is stored separately.

For the bus type there is some potential confusion in powsybl. In pandapower, this always refers to the net.bus df. In powsybl in a bus/branch model, there are no busbar sections in powsybl, i.e. net.get_node_breaker_topology does not deliver busbar sections. Meaning, the "bus" type refers to the net.get_bus_breaker_topology buses if it's a bus/breaker topology bus. If it's a node/breaker topology, then "bus" refers to the busbar section.

ActionSet #

Bases: BaseModel

A collection of actions in the form of asset topology stations.

We make a convention that the beginning of the action set always includes substation local actions and the global actions are at the end.

starting_topology instance-attribute #

starting_topology

How the grid looked like when the action set was first generated.

connectable_branches instance-attribute #

connectable_branches

A list of assets that can be connected as a remedial action.

disconnectable_branches instance-attribute #

disconnectable_branches

A list of assets that can be disconnected as a remedial action. Currently the DC solver supports only branches.

pst_ranges instance-attribute #

pst_ranges

A list of phase shifting transformers that can be set as a remedial action.

hvdc_ranges instance-attribute #

hvdc_ranges

A list of high voltage direct current lines that can be set as a remedial action. This is currently not implemented yet in the solver.

local_actions instance-attribute #

local_actions

A list of split/reconfiguration actions that affect exactly one substation.

global_actions instance-attribute #

global_actions

A list of split/reconfiguration actions that affect multiple substations. Each action contains a list of affected stations.

load_action_set_fs #

load_action_set_fs(filesystem, file_path)

Load an action set from a file system.

PARAMETER DESCRIPTION
filesystem

The file system to use to load the action set.

TYPE: AbstractFileSystem

file_path

The path to the file containing the action set in json format.

TYPE: Union[str, Path]

RETURNS DESCRIPTION
ActionSet

The action set loaded from the file.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/stored_action_set.py
def load_action_set_fs(filesystem: AbstractFileSystem, file_path: Union[str, Path]) -> ActionSet:
    """Load an action set from a file system.

    Parameters
    ----------
    filesystem : AbstractFileSystem
        The file system to use to load the action set.
    file_path : Union[str, Path]
        The path to the file containing the action set in json format.

    Returns
    -------
    ActionSet
        The action set loaded from the file.
    """
    with filesystem.open(str(file_path), "r") as f:
        return ActionSet.model_validate_json(f.read())

load_action_set #

load_action_set(file_path)

Load an action set from a file.

PARAMETER DESCRIPTION
file_path

The path to the file containing the action set in json format.

TYPE: Union[str, Path]

RETURNS DESCRIPTION
ActionSet

The action set loaded from the file.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/stored_action_set.py
def load_action_set(file_path: Union[str, Path]) -> ActionSet:
    """Load an action set from a file.

    Parameters
    ----------
    file_path : Union[str, Path]
        The path to the file containing the action set in json format.

    Returns
    -------
    ActionSet
        The action set loaded from the file.
    """
    return load_action_set_fs(LocalFileSystem(), file_path)

save_action_set #

save_action_set(file_path, action_set)

Save an action set to a file.

PARAMETER DESCRIPTION
file_path

The path to the file to save the action set to in json format.

TYPE: Union[str, Path]

action_set

The action set to save.

TYPE: ActionSet

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/stored_action_set.py
def save_action_set(file_path: Union[str, Path], action_set: ActionSet) -> None:
    """Save an action set to a file.

    Parameters
    ----------
    file_path : Union[str, Path]
        The path to the file to save the action set to in json format.
    action_set : ActionSet
        The action set to save.

    """
    save_pydantic_model_fs(filesystem=LocalFileSystem(), file_path=file_path, pydantic_model=action_set)

random_actions #

random_actions(action_set, rng, n_split_subs)

Generate a random topology from the action set.

Makes sure to sample each substation at most once.

PARAMETER DESCRIPTION
action_set

The action set to generate the random topology from.

TYPE: ActionSet

rng

The random number generator to use.

TYPE: Generator

n_split_subs

The number of substations to split. If this is more than total number of substations, all substations are split. (i.e. will be clipped to the number of substations)

TYPE: int

RETURNS DESCRIPTION
list[int]

A list of indices of the action set with substations to split.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/stored_action_set.py
def random_actions(action_set: ActionSet, rng: np.random.Generator, n_split_subs: int) -> list[int]:
    """Generate a random topology from the action set.

    Makes sure to sample each substation at most once.

    Parameters
    ----------
    action_set : ActionSet
        The action set to generate the random topology from.
    rng : np.random.Generator
        The random number generator to use.
    n_split_subs : int
        The number of substations to split. If this is more than total number of substations, all substations are split.
        (i.e. will be clipped to the number of substations)

    Returns
    -------
    list[int]
        A list of indices of the action set with substations to split.
    """
    # First sample the substations to split
    substations = list(set(station.grid_model_id for station in action_set.local_actions))
    sub_choice = rng.choice(substations, size=min(n_split_subs, len(substations)), replace=False).tolist()

    # Then sample an action for each substation
    actions = []
    for grid_model_id in sub_choice:
        applicable_indices = [
            i for i, station in enumerate(action_set.local_actions) if station.grid_model_id == grid_model_id
        ]
        actions.append(rng.choice(applicable_indices).item())
    return actions

Messages#

toop_engine_interfaces.messages.preprocess #

toop_engine_interfaces.messages.lf_service #

toop_engine_interfaces.messages.lf_service.loadflow_commands #

Describes the interfaces for a loadflow service providing N-1 computations in AC or DC to customers.

The communication follows a 2 step pattern:

  1. Grid load - Load a grid file into the engine and potentially perform some preprocessing. The engine should return a grid reference upon this call, which is used in the job to reference the grid. Multiple grids can be loaded at the same time, it is the responsibility of the engine to perform memory management (i.e. swap out a grid to disk if memory is full).
  2. Execute jobs - Run a loadflow job on the engine. The job references the grid that was loaded in step 2. A call to this can contain multiple jobs, allowing the engine to parallelize over jobs in addition to parallelizing over timesteps/outages.

The LoadflowEngine protocol describes this two step process in detail.

BaseFilter #

Bases: BaseModel, ABC

A base class for filters.

The idea behind filters is to implement logics that can not be implemented with the monitored elements directly but that are dependent on the loadflow results

This is not to be used directly, but to be subclassed.

WorstContingencyBranchFilter #

Bases: BaseFilter

If this filter is applied, it will reduce the branch results to only the worst N-1 case/side per branch and timestep.

Worst is determined with respect to the loading value. Branches that don't have a rating will never be returned.

If there are multiple worst N-1 cases/sides that produce a tie, one will be chosen at random. This filtering happens on a per-timestep basis, meaning for every timestep there should be as many results as monitored branches, but they can refer to different N-1 cases.

return_basecase class-attribute instance-attribute #

return_basecase = False

Whether to return the basecase still. If this is set to True, the basecase will always be returned even if it is not the worst case.

filter_type class-attribute instance-attribute #

filter_type = 'worst_contingency'

An identifier for the discriminated union

VoltageBandFilter #

Bases: BaseFilter

If this filter is applied, it will reduce the node results to only the results that are outside of a specified band.

The band is defined by a minimum and maximum p.u. value for all nodes

return_basecase class-attribute instance-attribute #

return_basecase = False

Whether to return the basecase at all times. If this is set to True, the basecase will always be returned even if it is inside the band and hence should be filtered out.

v_min instance-attribute #

v_min

The minimum voltage in p.u. - values below this will be returned.

v_max instance-attribute #

v_max

The maximum voltage in p.u. - values above this will be returned

filter_type class-attribute instance-attribute #

filter_type = 'voltage_band'

An identifier for the discriminated union

PercentCutoffBranchFilter #

Bases: BaseFilter

Filter, if applied returns only branch results that are above a loading threshold.

Elements for which no loading could be computed (e.g. due to missing ratings) are never returned. This filtering happens on a per-timestep basis, i.e. if a branch/contingency is above the threshold in one timestep, it will be returned in exactly that timestep.

loading_threshold instance-attribute #

loading_threshold

The loading threshold in percent. Only branches with a loading above this threshold are returned.

filter_type class-attribute instance-attribute #

filter_type = 'percent_cutoff'

An identifier for the discriminated union

Job #

Bases: BaseModel

A job constitutes a single workload and will produce a LoadflowResults object.

There are different types of jobs based on the workload, the simple being a base job with no changes to the base grid.

id instance-attribute #

id

A unique identifier for the job. This is used to reference the job in the results.

branch_filter class-attribute instance-attribute #

branch_filter = Field(
    default=None, discriminator="filter_type"
)

Filters for the branch results table. Exactly one filter can be active per table and job

node_filter class-attribute instance-attribute #

node_filter = Field(
    default=None, discriminator="filter_type"
)

Filters for the node results table. Exactly one filter can be active per table and job

job_type class-attribute instance-attribute #

job_type = 'bare'

An identifier for the discriminated union

timestep_subselection class-attribute instance-attribute #

timestep_subselection = None

If this is set, only the timesteps in this list are computed. If this is not set, all timesteps are computed. Timesteps are referenced by their index in the grid file, starting at 0.

JobWithSwitchingStrategy #

Bases: Job

A job that includes a switching strategy.

This strategy shall be applied before the loadflow computation and might alter the topology of the grid.

strategy instance-attribute #

strategy

The switching strategy that is to be applied before the loadflow computation

job_type class-attribute instance-attribute #

job_type = 'strategy'

An identifier for the discriminated union

id instance-attribute #

id

A unique identifier for the job. This is used to reference the job in the results.

branch_filter class-attribute instance-attribute #

branch_filter = Field(
    default=None, discriminator="filter_type"
)

Filters for the branch results table. Exactly one filter can be active per table and job

node_filter class-attribute instance-attribute #

node_filter = Field(
    default=None, discriminator="filter_type"
)

Filters for the node results table. Exactly one filter can be active per table and job

timestep_subselection class-attribute instance-attribute #

timestep_subselection = None

If this is set, only the timesteps in this list are computed. If this is not set, all timesteps are computed. Timesteps are referenced by their index in the grid file, starting at 0.

JobWithCGMESChanges #

Bases: Job

A job that includes changes in CGMES format. This is only applicable if the grid is a CGMES grid

tp_files instance-attribute #

tp_files

The file including the topology changes that shall be applied.

There must be as many entries as timesteps in the grid, but the same file can be referenced multiple times.

ssh_files instance-attribute #

ssh_files

The file including the state/injection changes that shall be applied.

There must be as many entries as timesteps in the grid, but the same file can be referenced multiple times.

job_type class-attribute instance-attribute #

job_type = 'cgmes_changes'

An identifier for the discriminated union

id instance-attribute #

id

A unique identifier for the job. This is used to reference the job in the results.

branch_filter class-attribute instance-attribute #

branch_filter = Field(
    default=None, discriminator="filter_type"
)

Filters for the branch results table. Exactly one filter can be active per table and job

node_filter class-attribute instance-attribute #

node_filter = Field(
    default=None, discriminator="filter_type"
)

Filters for the node results table. Exactly one filter can be active per table and job

timestep_subselection class-attribute instance-attribute #

timestep_subselection = None

If this is set, only the timesteps in this list are computed. If this is not set, all timesteps are computed. Timesteps are referenced by their index in the grid file, starting at 0.

InjectionAddition #

Bases: BaseModel

A single addition of an injection at a node.

This feature only support PQ nodes, if attempted to apply to a branch, pv node or slack node, the engine should ignore this addition and log a warning.

Positive values shall have the same effect as sgens, i.e. power is produced, while negative values will have the same effect as loads, i.e. power is consumed.

node instance-attribute #

node

The node to which the injection is added

p_mw instance-attribute #

p_mw

The active power in MW that is added to the node

q_mw instance-attribute #

q_mw

The reactive power that is added to the node

timestep_subselection class-attribute instance-attribute #

timestep_subselection = None

If this is given, the addition only happens in the timesteps that are in this list. If this is not given, the addition happens in all timesteps.

JobWithInjectionAdditions #

Bases: Job

Adds a constant injection to a node in the grid.

This feature assumes all injections are added to PQ nodes - otherwise they will be ignored.

Positive values shall have the same effect as sgens, i.e. power is produced, while negative values will have the same effect as loads, i.e. power is consumed.

additions instance-attribute #

additions

The injections that are added to the grid

job_type class-attribute instance-attribute #

job_type = 'injection_additions'

An identifier for the discriminated union

id instance-attribute #

id

A unique identifier for the job. This is used to reference the job in the results.

branch_filter class-attribute instance-attribute #

branch_filter = Field(
    default=None, discriminator="filter_type"
)

Filters for the branch results table. Exactly one filter can be active per table and job

node_filter class-attribute instance-attribute #

node_filter = Field(
    default=None, discriminator="filter_type"
)

Filters for the node results table. Exactly one filter can be active per table and job

timestep_subselection class-attribute instance-attribute #

timestep_subselection = None

If this is set, only the timesteps in this list are computed. If this is not set, all timesteps are computed. Timesteps are referenced by their index in the grid file, starting at 0.

BaseGrid #

Bases: BaseModel, ABC

A base class for grid files. This is not to be used directly, but to be subclassed

n_1_definition class-attribute instance-attribute #

n_1_definition = None

The N-1 cases that are to be computed. If this is provided, this shall overwrite the N-1 cases that are defined in the grid files if the format supports such definition. If this is not provided, the N-1 cases that are defined in the grid files shall be used. If neither is provided, the engine should throw an error.

grid_type instance-attribute #

grid_type

An identifier for the discriminated union, to be set by the subclasses

CGMESGrid #

Bases: BaseGrid

A CGMES grid file does not need to store much additional information

grid_files instance-attribute #

grid_files

A list of paths to grid files. This can include multiple .tp and .ssh files which are to be interpreted as multiple timesteps. If a .tp and a .ssh file have the same filename or the same timestep metadata inside the file, they correspond to the same timestep. Timesteps should be sorted by the timestep information inside the CGMES files.

grid_type class-attribute instance-attribute #

grid_type = 'cgmes'

An identifier for the discriminated union

n_1_definition class-attribute instance-attribute #

n_1_definition = None

The N-1 cases that are to be computed. If this is provided, this shall overwrite the N-1 cases that are defined in the grid files if the format supports such definition. If this is not provided, the N-1 cases that are defined in the grid files shall be used. If neither is provided, the engine should throw an error.

UCTEGrid #

Bases: BaseGrid

A list of UCTE files that are to be loaded into the engine

grid_files instance-attribute #

grid_files

A list of paths to grid files. This can include multiple .ucte files which are to be interpreted as multiple timesteps. Timesteps should be interpreted in the order of this list

grid_type class-attribute instance-attribute #

grid_type = 'ucte'

An identifier for the discriminated union

n_1_definition class-attribute instance-attribute #

n_1_definition = None

The N-1 cases that are to be computed. If this is provided, this shall overwrite the N-1 cases that are defined in the grid files if the format supports such definition. If this is not provided, the N-1 cases that are defined in the grid files shall be used. If neither is provided, the engine should throw an error.

PowsyblGrid #

Bases: BaseGrid

A list of powsybl xiidm files that are to be loaded into the engine

grid_files instance-attribute #

grid_files

A list of xiidm files that represent the timesteps. Timesteps should be interpreted in the order of this list

grid_type class-attribute instance-attribute #

grid_type = 'powsybl'

n_1_definition class-attribute instance-attribute #

n_1_definition = None

The N-1 cases that are to be computed. If this is provided, this shall overwrite the N-1 cases that are defined in the grid files if the format supports such definition. If this is not provided, the N-1 cases that are defined in the grid files shall be used. If neither is provided, the engine should throw an error.

PandapowerGrid #

Bases: BaseGrid

A list of pandapower json files that are to be loaded into the engine

grid_files instance-attribute #

grid_files

A list of pandapower files that represent the timesteps. Timesteps should be interpreted in the order of this list

grid_type class-attribute instance-attribute #

grid_type = 'pandapower'

n_1_definition class-attribute instance-attribute #

n_1_definition = None

The N-1 cases that are to be computed. If this is provided, this shall overwrite the N-1 cases that are defined in the grid files if the format supports such definition. If this is not provided, the N-1 cases that are defined in the grid files shall be used. If neither is provided, the engine should throw an error.

StartCalculationCommand #

Bases: BaseModel

A command to run a list of jobs on the engine.

This can involve multiple N-1 computations with different changes to the base grid, but a job must share the same grid file.

loadflow_id instance-attribute #

loadflow_id

A unique identifier for the loadflow run. This is used to identify the result

grid_data class-attribute instance-attribute #

grid_data = Field(discriminator='grid_type')

The string that was returned by load_grid, identifying the grid file that this job collection shall run on

method instance-attribute #

method

The method that is to be used for the loadflow computations. This can be either AC or DC. This must be the same for all jobs in the list

jobs instance-attribute #

jobs

The jobs to be executed

LoadflowEngine #

Bases: Protocol

A protocol for a loadflow engine.

Roughly, an engine shall be able to load grids and execute jobs on them. There is some memory management that the engine needs to perform internally, i.e. it could happen that two users want to use the same engine in parallel. In that case, the engine should swap out grids to disk if memory is full.

run_job #

run_job(job)

Run a job on the engine.

This can involve multiple N-1 computations with different changes to the base grid, identified through multiple jobs in the BatchJob. The engine should return the results of the jobs as in-memory dataframes.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/messages/lf_service/loadflow_commands.py
def run_job(self, job: StartCalculationCommand) -> list[LoadflowResults]:
    """Run a job on the engine.

    This can involve multiple N-1 computations with different changes to the base grid,
    identified through multiple jobs in the BatchJob. The engine should return the results of the jobs as in-memory
    dataframes.
    """

ShutdownCommand #

Bases: BaseModel

A command to shut down the preprocessing worker

exit_code class-attribute instance-attribute #

exit_code = 0

The exit code to return

LoadflowServiceCommand #

Bases: BaseModel

A wrapper to aid deserialization

command instance-attribute #

command

The actual command posted

timestamp class-attribute instance-attribute #

timestamp = Field(default_factory=lambda: str(now()))

When the command was sent

uuid class-attribute instance-attribute #

uuid = Field(default_factory=lambda: str(uuid4()))

A unique identifier for this command message, used to avoid duplicates during processing

toop_engine_interfaces.messages.lf_service.loadflow_heartbeat #

Loadflow Heartbeat Commands for the kafka worker.

LoadflowStatusInfo #

Bases: BaseModel

A status info to inform about an ongoint Loadflow solving action.

loadflow_id instance-attribute #

loadflow_id

The id of the loadflow solving job.

runtime instance-attribute #

runtime

The amount of time since the start of the optimization.

message class-attribute instance-attribute #

message = ''

An optional message

LoadflowHeartbeat #

Bases: BaseModel

A message class for heartbeats from the loadflow worker.

When idle, this just sends a hello, and when solving it also conveys the current status of the Loadflow Analysis

idle instance-attribute #

idle

Whether the worker is idle

status_info instance-attribute #

status_info

If not idle, a status update

timestamp class-attribute instance-attribute #

timestamp = Field(default_factory=lambda: str(now()))

When the heartbeat was sent

uuid class-attribute instance-attribute #

uuid = Field(default_factory=lambda: str(uuid4()))

A unique identifier for this heartbeat message, used to avoid duplicates during processing

toop_engine_interfaces.messages.preprocess.preprocess_results #

Contains the result classes for the preprocessing process.

UcteImportResult #

Bases: BaseModel

Statistics and results from an import process of UCTE data

data_folder instance-attribute #

data_folder

The path where the entry point where the timestep data folder structure starts. The folder structure is defined in dc_solver.interfaces.folder_structure. Can be on a temp dir

n_relevant_subs class-attribute instance-attribute #

n_relevant_subs = 0

The number of relevant substations

n_low_impedance_lines class-attribute instance-attribute #

n_low_impedance_lines = 0

The number of low impedance lines that have been converted to a switch

n_branch_across_switch class-attribute instance-attribute #

n_branch_across_switch = 0

The number of branches across a switch that have been removed

n_line_for_nminus1 class-attribute instance-attribute #

n_line_for_nminus1 = 0

The number of lines in the N-1 definition

n_line_for_reward class-attribute instance-attribute #

n_line_for_reward = 0

The number of lines that are observed

n_line_disconnectable class-attribute instance-attribute #

n_line_disconnectable = 0

The number of lines that are disconnectable

n_trafo_for_nminus1 class-attribute instance-attribute #

n_trafo_for_nminus1 = 0

The number of trafos in the N-1 definition

n_trafo_for_reward class-attribute instance-attribute #

n_trafo_for_reward = 0

The number of trafos that are observed

n_trafo_disconnectable class-attribute instance-attribute #

n_trafo_disconnectable = 0

The number of trafos in the N-1 definition

n_tie_line_for_reward class-attribute instance-attribute #

n_tie_line_for_reward = 0

The number of tie lines that are observed

n_tie_line_for_nminus1 class-attribute instance-attribute #

n_tie_line_for_nminus1 = 0

The number of tie lines in the N-1 definition

n_tie_line_disconnectable class-attribute instance-attribute #

n_tie_line_disconnectable = 0

The number of tie lines that are disconnectable

n_dangling_line_for_nminus1 class-attribute instance-attribute #

n_dangling_line_for_nminus1 = 0

The number of dangling lines in the N-1 definition

n_generator_for_nminus1 class-attribute instance-attribute #

n_generator_for_nminus1 = 0

The number of generators in the N-1 definition

n_load_for_nminus1 class-attribute instance-attribute #

n_load_for_nminus1 = 0

The number of loads in the N-1 definition

n_switch_for_nminus1 class-attribute instance-attribute #

n_switch_for_nminus1 = 0

The number of switches in the N-1 definition

n_switch_for_reward class-attribute instance-attribute #

n_switch_for_reward = 0

The number of switches that are observed

n_white_list class-attribute instance-attribute #

n_white_list = 0

The number of elements in the whitelist in total

n_white_list_applied class-attribute instance-attribute #

n_white_list_applied = 0

The number of elements in the whitelist that were successfully matched and applied

n_black_list class-attribute instance-attribute #

n_black_list = 0

The number of elements in the blacklist in total

n_black_list_applied class-attribute instance-attribute #

n_black_list_applied = 0

The number of elements in the blacklist that were successfully matched and applied

grid_type class-attribute instance-attribute #

grid_type = 'ucte'

The discriminator for the ImportResult Union

StaticInformationStats #

Bases: BaseModel

Stats about the static information class

time class-attribute instance-attribute #

time = None

The timestep that was optimized, if given

fp_dtype class-attribute instance-attribute #

fp_dtype = ''

A string representation of the floating point type used in the static informations, e.g. 'float32' or 'float64'.

has_double_limits class-attribute instance-attribute #

has_double_limits = False

Whether the static information has max_mw_flow_limited set or not

n_branches class-attribute instance-attribute #

n_branches = 0

The number of branches in the PTDF matrix

n_nodes class-attribute instance-attribute #

n_nodes = 0

The number of nodes in the PTDF matrix

n_branch_outages class-attribute instance-attribute #

n_branch_outages = 0

How many branch outages are part of the N-1 computation

n_multi_outages class-attribute instance-attribute #

n_multi_outages = 0

How many multi-outages are part of the N-1 computation

n_injection_outages class-attribute instance-attribute #

n_injection_outages = 0

How many injection outages are part of the N-1 computation

n_busbar_outages class-attribute instance-attribute #

n_busbar_outages = 0

How many busbar outages are part of the N-1 computation

n_nminus1_cases class-attribute instance-attribute #

n_nminus1_cases = 0

How many N-1 cases are there in total

n_controllable_psts class-attribute instance-attribute #

n_controllable_psts = 0

How many controllable phase shifting transformers are in the grid

n_monitored_branches class-attribute instance-attribute #

n_monitored_branches = 0

How many branches are monitored

n_timesteps class-attribute instance-attribute #

n_timesteps = 0

How many timesteps are optimized at the same time

n_relevant_subs class-attribute instance-attribute #

n_relevant_subs = 0

How many relevant substations are in the grid

n_disc_branches class-attribute instance-attribute #

n_disc_branches = 0

How many disconnectable branches are in the definition

overload_energy_n0 class-attribute instance-attribute #

overload_energy_n0 = 0.0

What is the N-0 overload energy of the unsplit configuration

overload_energy_n1 class-attribute instance-attribute #

overload_energy_n1 = 0.0

What is the N-1 overload energy of the unsplit configuration

n_actions class-attribute instance-attribute #

n_actions = 0

How many actions have been precomputed in the action set. This is the size of the branch action set, note that combinations of actions within that set are possible (product set wise) if multiple substations are split

max_station_branch_degree class-attribute instance-attribute #

max_station_branch_degree = 0

The maximum number of branches connected to any station in the grid

max_station_injection_degree class-attribute instance-attribute #

max_station_injection_degree = 0

The maximum number of injections connected to any station in the grid

mean_station_branch_degree class-attribute instance-attribute #

mean_station_branch_degree = 0.0

The average number of branches connected to any station in the grid

mean_station_injection_degree class-attribute instance-attribute #

mean_station_injection_degree = 0.0

The average number of injections connected to any station in the grid

reassignable_branch_assets class-attribute instance-attribute #

reassignable_branch_assets = 0

The total number of reassignable branch assets in the grid, i.e. how many branches are connected to any of the stations

reassignable_injection_assets class-attribute instance-attribute #

reassignable_injection_assets = 0

The total number of reassignable injection assets in the grid, i.e. how many injections are connected to any of the stations

max_reassignment_distance class-attribute instance-attribute #

max_reassignment_distance = 0

The maximum reassignment distance associated with any action

PowerFactoryImportResult #

Bases: BaseModel

Statistics and results from an import process of PowerFactory data, TODO fill

grid_type class-attribute instance-attribute #

grid_type = 'power_factory'

The discriminator for the ImportResult Union

PreprocessingSuccessResult #

Bases: BaseModel

Results of a preprocessing run, mainly including the static_information and network_data files.

data_folder instance-attribute #

data_folder

The path where the entry point where the timestep data folder structure starts. The folder structure is defined in dc_solver.interfaces.folder_structure. Can be on a temp dir

initial_loadflow instance-attribute #

initial_loadflow

The initial AC loadflow results, i.e. the N-1 analysis without any actions applied to the grid.

initial_metrics instance-attribute #

initial_metrics

The initial metrics computed for the loadflow results

static_information_stats instance-attribute #

static_information_stats

Statistics about the static information file that was produced

importer_results class-attribute instance-attribute #

importer_results = Field(discriminator='grid_type')

The results of the importer process

result_type class-attribute instance-attribute #

result_type = 'preprocessing_success'

The discriminator for the Result Union

PreprocessingStartedResult #

Bases: BaseModel

A message that is sent when the preprocessing process has started

result_type class-attribute instance-attribute #

result_type = 'preprocessing_started'

The discriminator for the Result Union

ErrorResult #

Bases: BaseModel

A message that is sent if an error occurred

error instance-attribute #

error

The error message

result_type class-attribute instance-attribute #

result_type = 'error'

The discriminator for the Result Union

Result #

Bases: BaseModel

A generic class for result, holding either a successful or an unsuccessful result

preprocess_id instance-attribute #

preprocess_id

The preprocess_id that was sent in the preprocess_command, used to identify the result

instance_id class-attribute instance-attribute #

instance_id = ''

The instance id of the importer worker that created this result

runtime instance-attribute #

runtime

The runtime in seconds that the preprocessing took until the result

result class-attribute instance-attribute #

result = Field(discriminator='result_type')

The actual result data in a discriminated union

uuid class-attribute instance-attribute #

uuid = Field(default_factory=lambda: str(uuid4()))

A unique identifier for this result message, used to avoid duplicates during processing

timestamp class-attribute instance-attribute #

timestamp = Field(default_factory=lambda: str(now()))

When the result was sent

toop_engine_interfaces.messages.preprocess.preprocess_commands #

Defines the commands that can be sent to a preprocessing worker.

UCTERegionType module-attribute #

UCTERegionType = Literal[
    "A",
    "B",
    "C",
    "D",
    "D1",
    "D2",
    "D4",
    "D6",
    "D7",
    "D8",
    "E",
    "F",
    "G",
    "H",
    "I",
    "J",
    "K",
    "L",
    "M",
    "N",
    "O",
    "P",
    "Q",
    "R",
    "S",
    "T",
    "U",
    "V",
    "W",
    "X",
    "Y",
    "Z",
    "0",
    "2",
    "_",
]

CGMESRegionType module-attribute #

CGMESRegionType = Literal[
    "AL",
    "AD",
    "AM",
    "AT",
    "AZ",
    "BY",
    "BE",
    "BA",
    "BG",
    "HR",
    "CY",
    "CZ",
    "DK",
    "EE",
    "FI",
    "FR",
    "GE",
    "DE",
    "GR",
    "HU",
    "IS",
    "IE",
    "IT",
    "LV",
    "LI",
    "LT",
    "LU",
    "MT",
    "MD",
    "MC",
    "ME",
    "NL",
    "MK",
    "NO",
    "PL",
    "PT",
    "RO",
    "RU",
    "SM",
    "RS",
    "SK",
    "SI",
    "ES",
    "SE",
    "CH",
    "TR",
    "UA",
    "GB",
    "VA",
]

AllCountriesRegionType module-attribute #

AllCountriesRegionType = Literal['']

RegionType module-attribute #

GridModelType module-attribute #

GridModelType = Literal['ucte', 'cgmes']

LimitAdjustmentParameters #

Bases: BaseModel

Parameters for the adjustment of limits of special branches.

The new operational limits will be created like this: 1) Compute AC Loadflow 2) new_limit = current_flow * n_0_factor (or n_1_factor) 3) If new_limit > min_increase (=old_limit * n_0_min_increase) new_limit = min_increase 4) If new_limit > old_limit new_limit = old_limit

n_0_factor class-attribute instance-attribute #

n_0_factor = 1.2

The factor for the N-0 current limit. Default is 1.2

n_1_factor class-attribute instance-attribute #

n_1_factor = 1.4

The factor for the N-1 current limit. Default is 1.4

n_0_min_increase class-attribute instance-attribute #

n_0_min_increase = 0.05

The minimal allowed load increase in percent for the n-0 case. This value multiplied with the current limit gives a lower border for the new limit. This makes sure, that lines that currently are barely loaded can be used at all Default is 5%.

n_1_min_increase class-attribute instance-attribute #

n_1_min_increase = 0.05

The minimal allowed load increase in percent for the n-1 case. This value multiplied with the current limit gives a lower border for the new limit. This makes sure, that lines that currently are barely loaded can be used at all Default is 5%.

get_parameters_for_case #

get_parameters_for_case(case)

Get the factors for the specific case

PARAMETER DESCRIPTION
case

Which case should be returned

TYPE: Literal[n0, n1]

RETURNS DESCRIPTION
tuple[PositiveFloat, PositiveFloat]

The factor and the minimal increase

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/messages/preprocess/preprocess_commands.py
def get_parameters_for_case(self, case: Literal["n0", "n1"]) -> tuple[PositiveFloat, PositiveFloat]:
    """Get the factors for the specific case

    Parameters
    ----------
    case: Literal["n0", "n1"]
        Which case should be returned

    Returns
    -------
    tuple[PositiveFloat, PositiveFloat]
        The factor and the minimal increase
    """
    if case == "n0":
        return self.n_0_factor, self.n_0_min_increase
    if case == "n1":
        return self.n_1_factor, self.n_1_min_increase
    raise ValueError(f"Case {case} not defined")

AreaSettings #

Bases: BaseModel

Setting related to the areas that are imported

control_area instance-attribute #

control_area

The area in which switching can take place. Substations from this area will automatically become relevant substations (switchable) except they are below the cutoff voltage. Also lines in this area will become disconnectable.

view_area instance-attribute #

view_area

The areas in which branches shall be part of the overload computation, i.e. for which regions shall line flows be computed.

nminus1_area instance-attribute #

nminus1_area

The areas where elements shall be part of the N-1 computation, i.e. which elements to fail.

cutoff_voltage class-attribute instance-attribute #

cutoff_voltage = 220

The cutoff voltage under which to ignore equipment. Equipment that doesn't have at least one end equal or above this nominal voltage will not be part of the reward/nminus1 computation

dso_trafo_factors class-attribute instance-attribute #

dso_trafo_factors = None

If given, the N-0 and N-1 flows across the dso trafos in the specied region will be limited to the current N-0 flows in the unsplit configuration. For each case (n0 or n1) a new operational limit with the name "border_limit_n0"/"border_limit_n1" is added.

dso_trafo_weight class-attribute instance-attribute #

dso_trafo_weight = 1.0

A weight that is used for trafos that leave the n-1 area, to underlying DSOs

border_line_factors class-attribute instance-attribute #

border_line_factors = None

If given, the N-0 and N-1 flows across the border lines leaving or entering the specied region will be limited to the current N-0 flows in the unsplit configuration. For each case (n0 or n1) a new operational limit with the name "border_limit_n0"/"border_limit_n1" is added.

border_line_weight class-attribute instance-attribute #

border_line_weight = 1.0

A weight that is used for lines that leave the n-1 area, to neighbouring TSOs

RelevantStationRules #

Bases: BaseModel

Rules to determine whether a substation is relevant or not.

min_busbars class-attribute instance-attribute #

min_busbars = 2

The minimum number of busbars a substation must have to be considered relevant.

min_connected_branches class-attribute instance-attribute #

min_connected_branches = 4

The minimum number of connected branches a substation must have to be considered relevant. This only counts branches (lines, transformers, tie-lines), not injections (generators, loads, shunts, etc.).

min_connected_elements class-attribute instance-attribute #

min_connected_elements = 4

The minimum number of connected elements a substation must have to be considered relevant. This includes branches and injections (generators, loads, shunts, etc.).

BaseImporterParameters #

Bases: BaseModel

Parameters that are required to import any data format.

area_settings instance-attribute #

area_settings

Which areas of the grid are to be imported and how to handle boundaries

data_folder instance-attribute #

data_folder

The path where the entry point where the timestep data folder structure starts.

The folder structure is defined in interfaces.folder_structure. This folder is relative to the processed_grid_folder that is configured in the backend/importer. A typical default would be grid_model_file.stem

grid_model_file instance-attribute #

grid_model_file

The path to the input grid model file.

This file should contain the grid model in the format defined by the data_type. For instance a .uct for UCTE data or a .zip for CGMES data.

data_type instance-attribute #

data_type

The type of data that is being imported.

This will determine the importer that is used to load the data.

white_list_file class-attribute instance-attribute #

white_list_file = None

The path to the white lists if present

black_list_file class-attribute instance-attribute #

black_list_file = None

The path to the balck lists if present

ignore_list_file class-attribute instance-attribute #

ignore_list_file = None

The path to the ignore lists if present

A csv file with the following columns: grid_model_id, reason

The implementation is expected to ignore all elements that are in the ignore list.

select_by_voltage_level_id_list class-attribute instance-attribute #

select_by_voltage_level_id_list = None

If given, only the voltage levels in this list will be imported. Note: not all voltage levels in this list might be considered relevant after preprocessing. This can happen if the requirements for relevant substations are not met. E.g. minimum number of busbars, connected branches or missing busbar couplers.

ingress_id class-attribute instance-attribute #

ingress_id = None

An optional id that is used to identify the source of the data. This can be used to track where the data came from, e.g. if it was imported from a specific database or a specific user.

contingency_list_file class-attribute instance-attribute #

contingency_list_file = None

The path to the contingency lists if present expected format see: importer/contingency_from_power_factory/PF_data_class.py

schema_format class-attribute instance-attribute #

schema_format = None

The schema format of the contingency list file if present. This can be either "ContingencyImportSchemaPowerFactory" or "ContingencyImportSchema". found in: - importer/contingency_from_power_factory/PF_data_class.py - importer/pypowsybl_import/contingency_from_file/contingency_file_models.py

relevant_station_rules class-attribute instance-attribute #

relevant_station_rules = RelevantStationRules()

Rules to determine whether a substation is relevant or not.

UcteImporterParameters #

Bases: BaseImporterParameters

Parameters that are required to import the data from a UCTE file.

This will utilize powsybl and the powsybl backend to the loadflow solver

area_settings class-attribute instance-attribute #

area_settings = AreaSettings(
    control_area=["D8"],
    view_area=["D2", "D4", "D7", "D8"],
    nminus1_area=["D2", "D4", "D7", "D8"],
)

By default the D8 is controllable and the german grid is viewable

grid_model_file instance-attribute #

grid_model_file

The path to the UCTE file to load. Note that only a single timestep, i.e. only a single UCTE file will be loaded in one import/preprocessing run. For multiple timesteps, the preprocessing is triggered multiple times.

data_type class-attribute instance-attribute #

data_type = 'ucte'

A constant field to indicate that this is a UCTE importer

data_folder instance-attribute #

data_folder

The path where the entry point where the timestep data folder structure starts.

The folder structure is defined in interfaces.folder_structure. This folder is relative to the processed_grid_folder that is configured in the backend/importer. A typical default would be grid_model_file.stem

white_list_file class-attribute instance-attribute #

white_list_file = None

The path to the white lists if present

black_list_file class-attribute instance-attribute #

black_list_file = None

The path to the balck lists if present

ignore_list_file class-attribute instance-attribute #

ignore_list_file = None

The path to the ignore lists if present

A csv file with the following columns: grid_model_id, reason

The implementation is expected to ignore all elements that are in the ignore list.

select_by_voltage_level_id_list class-attribute instance-attribute #

select_by_voltage_level_id_list = None

If given, only the voltage levels in this list will be imported. Note: not all voltage levels in this list might be considered relevant after preprocessing. This can happen if the requirements for relevant substations are not met. E.g. minimum number of busbars, connected branches or missing busbar couplers.

ingress_id class-attribute instance-attribute #

ingress_id = None

An optional id that is used to identify the source of the data. This can be used to track where the data came from, e.g. if it was imported from a specific database or a specific user.

contingency_list_file class-attribute instance-attribute #

contingency_list_file = None

The path to the contingency lists if present expected format see: importer/contingency_from_power_factory/PF_data_class.py

schema_format class-attribute instance-attribute #

schema_format = None

The schema format of the contingency list file if present. This can be either "ContingencyImportSchemaPowerFactory" or "ContingencyImportSchema". found in: - importer/contingency_from_power_factory/PF_data_class.py - importer/pypowsybl_import/contingency_from_file/contingency_file_models.py

relevant_station_rules class-attribute instance-attribute #

relevant_station_rules = RelevantStationRules()

Rules to determine whether a substation is relevant or not.

CgmesImporterParameters #

Bases: BaseImporterParameters

Parameters to start an import data from a CGMES file.

This will utilize powsybl and the powsybl backend to the loadflow solver.

area_settings class-attribute instance-attribute #

area_settings = AreaSettings(
    control_area=["BE"],
    view_area=["BE", "LU", "D4", "D2", "NL", "FR"],
    nminus1_area=["BE"],
    cutoff_voltage=220,
)

The area settings for the CGMES importer

grid_model_file instance-attribute #

grid_model_file

The path to the CGMES .zip file to load.

Note that only a single timestep, i.e. only a single CGMES .zip file will be loaded in one import/preprocessing run. For multiple timesteps, the preprocessing is triggered multiple times. Note: the .zip file must contain all xml files in the same root folder, i.e. the following files: - EQ.xml - SSH.xml - SV.xml - TP.xml - EQBD.xml - TPBD.xml

data_type class-attribute instance-attribute #

data_type = 'cgmes'

A constant field to indicate that this is a CGMES importer

data_folder instance-attribute #

data_folder

The path where the entry point where the timestep data folder structure starts.

The folder structure is defined in interfaces.folder_structure. This folder is relative to the processed_grid_folder that is configured in the backend/importer. A typical default would be grid_model_file.stem

white_list_file class-attribute instance-attribute #

white_list_file = None

The path to the white lists if present

black_list_file class-attribute instance-attribute #

black_list_file = None

The path to the balck lists if present

ignore_list_file class-attribute instance-attribute #

ignore_list_file = None

The path to the ignore lists if present

A csv file with the following columns: grid_model_id, reason

The implementation is expected to ignore all elements that are in the ignore list.

select_by_voltage_level_id_list class-attribute instance-attribute #

select_by_voltage_level_id_list = None

If given, only the voltage levels in this list will be imported. Note: not all voltage levels in this list might be considered relevant after preprocessing. This can happen if the requirements for relevant substations are not met. E.g. minimum number of busbars, connected branches or missing busbar couplers.

ingress_id class-attribute instance-attribute #

ingress_id = None

An optional id that is used to identify the source of the data. This can be used to track where the data came from, e.g. if it was imported from a specific database or a specific user.

contingency_list_file class-attribute instance-attribute #

contingency_list_file = None

The path to the contingency lists if present expected format see: importer/contingency_from_power_factory/PF_data_class.py

schema_format class-attribute instance-attribute #

schema_format = None

The schema format of the contingency list file if present. This can be either "ContingencyImportSchemaPowerFactory" or "ContingencyImportSchema". found in: - importer/contingency_from_power_factory/PF_data_class.py - importer/pypowsybl_import/contingency_from_file/contingency_file_models.py

relevant_station_rules class-attribute instance-attribute #

relevant_station_rules = RelevantStationRules()

Rules to determine whether a substation is relevant or not.

ReassignmentLimits #

Bases: BaseModel

Reassignment limits for electrical reconfiguration at substations.

max_reassignments_per_sub class-attribute instance-attribute #

max_reassignments_per_sub = 1000

The maximum number of reassignments to perform during the electrical reconfiguration. Gets overriden by station_specific_limits if an station id is given.

station_specific_limits class-attribute instance-attribute #

station_specific_limits = Field(default_factory=dict)

Specific reassignment limits per station to override the global reassignment limit. Expects a grid model id as key and the maximum number of reassignments as value. Note: the grid model id must match the id in the relevant substation list after import.

PreprocessParameters #

Bases: BaseModel

Parameters for the preprocessing procedure which is independent of the data source

filter_disconnectable_branches_processes class-attribute instance-attribute #

filter_disconnectable_branches_processes = 1

When checking for disconnectable branches, multiple worker processes can be used as it is a costly operation.

action_set_filter_bridge_lookup class-attribute instance-attribute #

action_set_filter_bridge_lookup = True

Whether to filter the action set using bridge lookups. This will remove all assignments that have less than two non-bridges on every side

action_set_filter_bsdf_lodf class-attribute instance-attribute #

action_set_filter_bsdf_lodf = True

Whether to filter the action set using a consecutive BSDF/LODF application. This will filter out all actions that are also filtered by bridge lookup and additionally all actions that split the grid under N-1 branch outages, i.e. all assignments that created a new bridge in the graph. This is a relatively costly process to run, only set to true if you can afford the extra preprocessing time.

action_set_filter_bsdf_lodf_batch_size class-attribute instance-attribute #

action_set_filter_bsdf_lodf_batch_size = 8

If filtering with bsdf/lodf - which batch size to use. Larger will use more memory but be faster.

action_set_clip class-attribute instance-attribute #

action_set_clip = 2 ** 23

After which size to randomly subselect actions at a substation. If a substations has a lot of branches, the action space will explode exponentially and a safe-guard is to clip after a certain number of actions.

asset_topo_close_couplers class-attribute instance-attribute #

asset_topo_close_couplers = False

Whether to close open couplers in all stations in the asset topology. This might accidentally cancel a maintenance

separation_set_clip_hamming_distance class-attribute instance-attribute #

separation_set_clip_hamming_distance = 0

If a large configuration table comes out of a substation, the table size can be reduced by removing configurations that are close to each other. This parameter sets the definition of close in terms of hamming distance, by default 0 (no reduction).

separation_set_clip_at_size class-attribute instance-attribute #

separation_set_clip_at_size = 100

By what size a table is considered large. If the table is larger than this size, the clip_hamming_distance will be used to reduce the table size, by default 100. If a table is smaller, no reduction will be applied.

realise_station_busbar_choice_heuristic class-attribute instance-attribute #

realise_station_busbar_choice_heuristic = (
    "least_connected_busbar"
)

The heuristic to use when there are multiple physical busbars available for an asset. The options are: - "first": Use the first busbar in the list of busbars (fastest preprocessing) - "least_connected_busbar": Use the busbar with the least number of connections to other assets (best results)

The "least_connected_busbar" heuristic is the default and is recommended for most cases, trying to spread the assets evenly across the busbars in a station.

electrical_reassignment_limits class-attribute instance-attribute #

electrical_reassignment_limits = None

If given, limits for the electrical reassignment at substations.

physical_reassignment_limits class-attribute instance-attribute #

physical_reassignment_limits = None

If given, limits for the physical reassignment at substations.

ac_dc_interpolation class-attribute instance-attribute #

ac_dc_interpolation = 0.0

Whether to use the DC loadflow as the base loadflow (0) or the AC loadflow (1). Can also be anything in between.

enable_n_2 class-attribute instance-attribute #

enable_n_2 = False

Whether to enable N-2 analysis

n_2_more_splits_penalty class-attribute instance-attribute #

n_2_more_splits_penalty = 2000.0

How to penalize additional splits in N-2 that were not there in the unsplit grid. Will be added to the overload energy penalty.

enable_bb_outage class-attribute instance-attribute #

enable_bb_outage = False

Whether to enable busbar outage analysis

bb_outage_as_nminus1 class-attribute instance-attribute #

bb_outage_as_nminus1 = True

Whether to treat busbar outages as N-1 outages. If set to False, the busbar outage will be treated similar to N-2 outages. This will be used to compute the busbar outage penalty.

bb_outage_more_splits_penalty class-attribute instance-attribute #

bb_outage_more_splits_penalty = 50.0

How to penalize additional splits in busbar outages that were not there in the unsplit grid. Will be added to the overload energy penalty.

enable_nodal_inj_optim class-attribute instance-attribute #

enable_nodal_inj_optim = False

Whether to enable nodal injection optimization (including PST optimization)

precision_percent class-attribute instance-attribute #

precision_percent = 0.0

The precision percent for the nodal injection optimization.

clip_bb_outage_penalty class-attribute instance-attribute #

clip_bb_outage_penalty = False

Whether to clip the lower bound of the busbar outage penalty to 0. We set this parameter to False, if we want the optimiser to solve busbar outage problems in the grid. However, when we just want to ensure that the busbar outage problems are not exacerbated due to the optimiser, we set this to True.

double_limit_n0 class-attribute instance-attribute #

double_limit_n0 = 0.9

If passed, then double limits will be computed for the N-0 flows. Lines that are below double_limit_n0 relative load in the unsplit configuration will have their capacity multiplied by double_limit_n0 to prevent loading them up to their maximum capacity.

double_limit_n1 class-attribute instance-attribute #

double_limit_n1 = 0.9

If passed, then double limits will be computed for the N-1 flows. Lines that are below double_limit_n1 relative load in the unsplit configuration will have their capacities multiplied by double_limit_n1 to prevent loading them up to their maximum capacity.

initial_loadflow_processes class-attribute instance-attribute #

initial_loadflow_processes = 8

How many processes to use to compute the initial AC loadflow

StartPreprocessingCommand #

Bases: BaseModel

A command to launch a preprocessing run of a timestep upon reception.

importer_parameters class-attribute instance-attribute #

importer_parameters = Field(discriminator='data_type')

The parameters to the importer, depending which input source was chosen

preprocess_parameters class-attribute instance-attribute #

preprocess_parameters = PreprocessParameters()

Parameters required for preprocessing independent of the data source

preprocess_id instance-attribute #

preprocess_id

The id of the preprocessing run, should be included in all responses to identify where the data came from

command_type class-attribute instance-attribute #

command_type = 'start_preprocessing'

ShutdownCommand #

Bases: BaseModel

A command to shut down the preprocessing worker

exit_code class-attribute instance-attribute #

exit_code = 0

The exit code to return

command_type class-attribute instance-attribute #

command_type = 'shutdown'

Command #

Bases: BaseModel

A wrapper to aid deserialization

command class-attribute instance-attribute #

command = Field(discriminator='command_type')

The actual command posted

timestamp class-attribute instance-attribute #

timestamp = Field(default_factory=lambda: str(now()))

When the command was sent

uuid class-attribute instance-attribute #

uuid = Field(default_factory=lambda: str(uuid4()))

A unique identifier for this command message, used to avoid duplicates during processing

toop_engine_interfaces.messages.preprocess.preprocess_heartbeat #

Contains the message classes for a preprocessing worker's heartbeat messages

logger module-attribute #

logger = Logger(__name__)

ConvertToJaxStage module-attribute #

ConvertToJaxStage = Literal[
    "convert_to_jax_started",
    "convert_tot_stat",
    "convert_relevant_inj",
    "convert_masks",
    "switching_distance_info",
    "pad_out_branch_actions",
    "convert_rel_bb_outage_data",
    "create_static_information",
    "filter_branch_actions",
    "unsplit_n2_analysis",
    "bb_outage_baseline_analysis",
    "convert_to_jax_done",
]

NumpyPreprocessStage module-attribute #

NumpyPreprocessStage = Literal[
    "preprocess_started",
    "extract_network_data_from_interface",
    "filter_relevant_nodes",
    "assert_network_data",
    "compute_ptdf_if_not_given",
    "compute_psdf_if_not_given",
    "add_nodal_injections_to_network_data",
    "combine_phaseshift_and_injection",
    "compute_bridging_branches",
    "exclude_bridges_from_outage_masks",
    "reduce_branch_dimension",
    "reduce_node_dimension",
    "filter_disconnectable_branches_nminus2",
    "compute_branch_topology_info",
    "compute_electrical_actions",
    "enumerate_station_realizations",
    "remove_relevant_subs_without_actions",
    "simplify_asset_topology",
    "compute_separation_set",
    "convert_multi_outages",
    "filter_inactive_injections",
    "compute_injection_topology_info",
    "process_injection_outages",
    "add_missing_asset_topo_info",
    "add_bus_b_columns_to_ptdf",
    "enumerate_injection_actions",
    "preprocess_bb_outage",
    "preprocess_done",
]

LoadGridStage module-attribute #

LoadGridStage = Literal[
    "load_grid_into_loadflow_solver_backend",
    "compute_base_loadflows",
    "save_artifacts",
]

InitialLoadflowStage module-attribute #

InitialLoadflowStage = Literal[
    "prepare_contingency_analysis",
    "run_contingency_analysis",
]

ImporterStage module-attribute #

ImporterStage = Literal[
    "start",
    "load_ucte",
    "get_topology_model",
    "modify_low_impedance_lines",
    "modify_branches_over_switches",
    "apply_cb_list",
    "cross_border_current",
    "get_masks",
    "end",
]

PreprocessStage module-attribute #

PreprocessStatusInfo #

Bases: BaseModel

A status info to inform about an ongoing preprocess action.

preprocess_id instance-attribute #

preprocess_id

The id of the preprocess job.

runtime instance-attribute #

runtime

The amount of time since the start of the optimization.

stage instance-attribute #

stage

The stage in which the preprocessing job currently is.

message instance-attribute #

message

An optional message

PreprocessHeartbeat #

Bases: BaseModel

A message class for heartbeats from the preprocessing worker.

When idle, this just sends a hello, and when preprocessing it also conveys a status update at which stage the preprocessing is so it can be tracked in the frontend.

idle instance-attribute #

idle

Whether the worker is idle

status_info class-attribute instance-attribute #

status_info = None

If not idle, a status update

instance_id class-attribute instance-attribute #

instance_id = ''

The ID of the worker instance that sent this heartbeat.

timestamp class-attribute instance-attribute #

timestamp = Field(default_factory=lambda: str(now()))

When the heartbeat was sent

uuid class-attribute instance-attribute #

uuid = Field(default_factory=lambda: str(uuid4()))

A unique identifier for this heartbeat message, used to avoid duplicates during processing

empty_status_update_fn #

empty_status_update_fn(stage, message)

Log an empty status update to logging.

Use this function when no status_update_fn is provided.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/messages/preprocess/preprocess_heartbeat.py
def empty_status_update_fn(stage: PreprocessStage, message: Optional[str]) -> None:
    """Log an empty status update to logging.

    Use this function when no status_update_fn is provided.
    """
    if message is None:
        logger.info(f"Preprocessing stage {stage}")
    else:
        logger.info(f"Preprocessing stage {stage}, {message}")

Loadflow Service#

toop_engine_interfaces.loadflow_results #

Defines interfaces for loadflow results.

The overall process is that a job is called on a loadflow engine for a grid. The grid holds some information that is referenced in the results: - an N-1 definition, which can include multi-contingencys. An N-1 case is uniquely identified by a string descriptor but can include multiple failing elements. The string identifier of the N-1 case should be delivered upon loading. - a number of timesteps, which are uniquely identified by an integer index. - branches which are uniquely identified by a string descriptor and have either two or three sides. - nodes which are uniquely identified by a string descriptor and have a type (PV, PQ, REF) - regulating elements which are uniquely identified by a string descriptor and have a type (generator, regulating transformer, SVC, ...)

LoadflowResultTable module-attribute #

LoadflowResultTable = Union[
    DataFrame[NodeResultSchema],
    DataFrame[BranchResultSchema],
    DataFrame[VADiffResultSchema],
    DataFrame[RegulatingElementResultSchema],
    DataFrame[ConvergedSchema],
]

BranchSide #

Bases: Enum

The side of a branch.

ONE class-attribute instance-attribute #

ONE = 1

The following side for the types of branches: - line: from side - 2 winding trafo: high voltage side - 3 winding trafo: high voltage side - other: from side

TWO class-attribute instance-attribute #

TWO = 2

The following side for the types of branches: - line: to side - 2 winding trafo: low voltage side - 3 winding trafo: medium voltage side - other: to side

THREE class-attribute instance-attribute #

THREE = 3

Only valid for 3 winding transformers, representing the low voltage side.

RegulatingElementType #

Bases: Enum

A list of known regulating elements, TODO expand

GENERATOR_Q class-attribute instance-attribute #

GENERATOR_Q = 'GENERATOR_Q'

A generator that is used to control the reactive power output.

SLACK_P class-attribute instance-attribute #

SLACK_P = 'SLACK_P'

The active power output of the slack node.

SLACK_Q class-attribute instance-attribute #

SLACK_Q = 'SLACK_Q'

The reactive power output of the slack node.

REGULATING_TRANSFORMER_TAP class-attribute instance-attribute #

REGULATING_TRANSFORMER_TAP = 'REGULATING_TRANSFORMER_TAP'

A regulating transformer that is used to control the tap position.

SVC_Q class-attribute instance-attribute #

SVC_Q = 'SVC_Q'

A static var compensator that is used to control the reactive power output.

HVDC_CONVERTER_Q class-attribute instance-attribute #

HVDC_CONVERTER_Q = 'HVDC_CONVERTER_Q'

An HVDC converter station.

OTHER class-attribute instance-attribute #

OTHER = 'OTHER'

A placeholder for not yet known regulating elements.

ConvergenceStatus #

Bases: Enum

The convergence status of the loadflow in a single timestep/contingency/component

CONVERGED class-attribute instance-attribute #

CONVERGED = 'CONVERGED'

The loadflow converged

FAILED class-attribute instance-attribute #

FAILED = 'FAILED'

The loadflow failed to start, e.g. because no slack bus was available

MAX_ITERATION_REACHED class-attribute instance-attribute #

MAX_ITERATION_REACHED = 'MAX_ITERATION_REACHED'

The maximum number of iterations was reached, i.e. the loadflow did not converge.

NO_CALCULATION class-attribute instance-attribute #

NO_CALCULATION = 'NO_CALCULATION'

The component was ignored due to other reasons (engine did not support it)

BranchResultSchema #

Bases: DataFrameModel

A schema for the branch results table.

This holds i, p and q values for all monitored branches with a multi-index of timestep, contingency (CO), branch (CB) and side.

If no branches are monitored, this is the empty DataFrame.

TODO Decide if this should be used for injections aswell#

timestep instance-attribute #

timestep

The timestep of this result. This indexes into the timesteps that were loaded

contingency instance-attribute #

contingency

The contingency that caused this loadflow. For N-0 results, the special CO 'BASECASE' without GridElements is used, if its added.

element instance-attribute #

element

The branch that these loadflow results correspond to

side class-attribute instance-attribute #

side = Field(isin=[(value) for side in BranchSide])

The side of the branch that these results correspond to

i class-attribute instance-attribute #

i = Field(nullable=True)

The current in the branch in A

This should only be NaN if the branch has no connection to the slack bus.

p class-attribute instance-attribute #

p = Field(nullable=True)

The active power in the branch in MW

This should only be NaN if the branch has no connection to the slack bus.

q class-attribute instance-attribute #

q = Field(nullable=True)

The reactive power in the branch in MVar

This should only be NaN if the branch has no connection to the slack bus.

loading class-attribute instance-attribute #

loading = Field(nullable=True)

The loading of the branch in % of rated current. This always refers to the permanent/default rating of the branch if there are multiple ratings available. If no rating is available for the branch, this should be set to NaN. If the engine does not support the computation of this value, the column can be omitted.

element_name class-attribute instance-attribute #

element_name = Field(default='')

The name of the Branch, if available. This is not used for the loadflow computation, but can be used for display purposes. If no name is available, this should be set to an empty string.

contingency_name class-attribute instance-attribute #

contingency_name = Field(default='')

The name of the contingency, if available. This is not used for the loadflow computation, but can be used for display purposes. If no name is available, this should be set to an empty string.

NodeResultSchema #

Bases: DataFrameModel

A schema for the node results table.

This holds p and q values for all monitored nodes with a multi-index of timestep and contingency. If no nodes are monitored, this is the empty DataFrame.

timestep instance-attribute #

timestep

The timestep of this result. This indexes into the timesteps that were loaded

contingency instance-attribute #

contingency

The contingency that caused this loadflow. For N-0 results, the special CO 'BASECASE' is used.

element instance-attribute #

element

The node that these loadflow results correspond to

vm class-attribute instance-attribute #

vm = Field(nullable=True)

The voltage magnitude at the node in kV.

In DC, this should be the nominal voltage of the node. This should only be NaN if the node does not have a connection to the slack bus.

vm_loading class-attribute instance-attribute #

vm_loading = Field(nullable=True)

How close the voltage magnitude is to the max/min voltage limits in percent. This is computed as: (vm - v_nominal) / (v_max - v_nominal) for vm > v_nominal and (vm - v_nominal) / (v_nominal - v_min) for vm < v_nominal.

va class-attribute instance-attribute #

va = Field(nullable=True)

The voltage angle at the node in degrees

This should only be NaN if the node does not have a connection to the slack bus.

p class-attribute instance-attribute #

p = Field(nullable=True)

The accumulated absolute active power at the node in MW, obtained by summing the absolute active power of all branches and injections connected to the node.

If the engine does not support the computation of this value, the column can be omitted.

q class-attribute instance-attribute #

q = Field(nullable=True)

The accumulated absolute reactive power at the node in MVar, obtained by summing the absolute reactive power of all branches and injections connected to the node

If the engine does not support the computation of this value, the column can be omitted.

element_name class-attribute instance-attribute #

element_name = Field(default='', nullable=True)

The name of the node, if available. This is not used for the loadflow computation, but can be used for display purposes. If no name is available, this should be set to an empty string.

contingency_name class-attribute instance-attribute #

contingency_name = Field(default='', nullable=True)

The name of the contingency, if available. This is not used for the loadflow computation, but can be used for display purposes. If no name is available, this should be set to an empty string.

VADiffResultSchema #

Bases: DataFrameModel

A schema for the voltage angle results.

Holds information about the voltage angle difference between busses that could be (re)connected by power switches.

timestep instance-attribute #

timestep

The timestep of this result. This indexes into the timesteps that were loaded

contingency instance-attribute #

contingency

The critical contingency that caused this loadflow. For N-0 results, the special CO 'BASECASE' is used.

element instance-attribute #

element

The element over which the voltage angle difference is computed. Can be either an open switch or any switch or branch under N-1. If under N-1, then element and contingency are the same.

va_diff class-attribute instance-attribute #

va_diff = Field(nullable=True)

The voltage angle difference in degrees between the two ends of the element. nan if at least one of the ends has no voltage angle (island, out of service)

element_name class-attribute instance-attribute #

element_name = Field(default='')

The name of the Branch or Switch, if available. This is not used for the loadflow computation, but can be used for display purposes. If no name is available, this should be set to an empty string.

contingency_name class-attribute instance-attribute #

contingency_name = Field(default='')

The name of the contingency, if available. This is not used for the loadflow computation, but can be used for display purposes. If no name is available, this should be set to an empty string.

RegulatingElementResultSchema #

Bases: DataFrameModel

A schema for the regulating elements.

A regulating element can either be a branch (a trafo with regulating tap) or a node (a generator, SVC, ...). If no regulating elements are monitored, this is the empty DataFrame.

timestep instance-attribute #

timestep

The timestep of this result. This indexes into the timesteps that were loaded

contingency instance-attribute #

contingency

The critical contingency that caused this loadflow. For N-0 results, the special CO 'BASECASE' is used.

element instance-attribute #

element

The regulating element that these loadflow results correspond to

value instance-attribute #

value

The value of the regulating element. Depending on the type of the regulating element, this can mean different things.

regulating_element_type class-attribute instance-attribute #

regulating_element_type = Field(
    isin=[(value) for side in RegulatingElementType]
)

The type of the regulating element (generator, regulating transformer, SVC, ...).

element_name class-attribute instance-attribute #

element_name = Field(default='')

The name of the Regulating Element, if available. This is not used for the loadflow computation, but can be used for display purposes. If no name is available, this should be set to an empty string.

contingency_name class-attribute instance-attribute #

contingency_name = Field(default='')

The name of the contingency, if available. This is not used for the loadflow computation, but can be used for display purposes. If no name is available, this should be set to an empty string.

ConvergedSchema #

Bases: DataFrameModel

A schema for the converged table. This holds the convergence information for each timestep.

Potentially, multiple islands can exist in the same grid. In this case, the synchronous component needs to be distinguished. A synchronous component is a grid area consisting of all nodes and branches that are connected to the same slack through AC lines (no HVDC). The largest component must always be called 'MAIN' while the names of the other components are arbitrary. Usually only one component is present. If no convergence information is available, this is the empty DataFrame.

timestep instance-attribute #

timestep

The timestep of this result. This indexes into the timesteps that were loaded

contingency instance-attribute #

contingency

The critical contingency that caused this loadflow. For N-0 results, the special CO 'BASECASE' is used.

status class-attribute instance-attribute #

status = Field(isin=[(value) for side in ConvergenceStatus])

Whether the loadflow converged at this timestep/contingency.

iteration_count class-attribute instance-attribute #

iteration_count = Field(nullable=True)

The number of iterations required for the loadflow to converge.

warnings class-attribute instance-attribute #

warnings = Field(default='')

An additional string field that carries warnings or error logs for specific timesteps/contingencys/components.

contingency_name class-attribute instance-attribute #

contingency_name = Field(default='')

The name of the contingency, if available. This is not used for the loadflow computation, but can be used for display purposes. If no name is available, this should be set to an empty string.

LoadflowResults #

Bases: BaseModel

A container for the loadflow results for a computation job.

job_id instance-attribute #

job_id

The id of the computation job that created these loadflows

branch_results class-attribute instance-attribute #

branch_results = None

The results for the branches. If no branches are monitored, this is the empty DataFrame. Non converging contingencys/timesteps are to be omitted

node_results class-attribute instance-attribute #

node_results = None

The results for the nodes. If no nodes are monitored, this is the empty DataFrame.

regulating_element_results class-attribute instance-attribute #

regulating_element_results = None

The results for the regulating elements. If no regulating elements are monitored, this is the empty DataFrame.

converged class-attribute instance-attribute #

converged = None

The convergence information for each timestep and contingency. If there were non-converging loadflows for some timesteps/contingencys, these results should be omitted from the other tables.

va_diff_results class-attribute instance-attribute #

va_diff_results = None

The voltage angle difference results for each timestep and contingency. Considers the ends of the outaged branch, aswell as all open switches in monitored elements.

warnings class-attribute instance-attribute #

warnings = Field(default_factory=list)

Global warnings that occured during the computation (e.g. monitored elements/contingencies that were not found)

additional_information class-attribute instance-attribute #

additional_information = Field(default_factory=list)

Additional information that the loadflow solver wants to convey to the user. There is no limitation what can be put in here except that it needs to be json serializable.

__eq__ #

__eq__(lf_result)

Compare two LoadflowResults objects for equality.

Rounds floats to 6 decimal places for comparison. This is necessary because floating point arithmetic can lead to small differences in the results.

Ignores the order of the DataFrames, but checks that the indices are equal.

PARAMETER DESCRIPTION
lf_result

The LoadflowResults object to compare with.

TYPE: LoadflowResults

RETURNS DESCRIPTION
bool

True if the two LoadflowResults objects are equal, False otherwise.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/loadflow_results.py
def __eq__(self, lf_result: Self) -> bool:
    """Compare two LoadflowResults objects for equality.

    Rounds floats to 6 decimal places for comparison.
    This is necessary because floating point arithmetic can lead to small differences in the results.

    Ignores the order of the DataFrames, but checks that the indices are equal.

    Parameters
    ----------
    lf_result : LoadflowResults
        The LoadflowResults object to compare with.

    Returns
    -------
    bool
        True if the two LoadflowResults objects are equal, False otherwise.
    """
    rounding_accuracy = 6

    if not isinstance(lf_result, LoadflowResults):
        return False

    job_match = self.job_id == lf_result.job_id
    warnings_match = self.warnings == lf_result.warnings
    additional_info_match = self.additional_information == lf_result.additional_information
    simple_checks = job_match and warnings_match and additional_info_match

    # Check shape
    branch_shape_match = self.branch_results.shape == lf_result.branch_results.shape
    node_shape_match = self.node_results.shape == lf_result.node_results.shape
    regulating_element_shape_match = self.regulating_element_results.shape == lf_result.regulating_element_results.shape
    va_diff_shape_match = self.va_diff_results.shape == lf_result.va_diff_results.shape
    converged_shape_match = self.converged.shape == lf_result.converged.shape
    shape_matches = (
        branch_shape_match
        and node_shape_match
        and regulating_element_shape_match
        and va_diff_shape_match
        and converged_shape_match
    )
    if not (shape_matches and simple_checks):
        return False

    # Check indices. One way is enough since the lengths are equal
    node_indizes_match = all(self.node_results.index.isin(lf_result.node_results.index))
    branch_indizes_match = all(self.branch_results.index.isin(lf_result.branch_results.index))
    regulating_element_indizes_match = all(
        self.regulating_element_results.index.isin(lf_result.regulating_element_results.index)
    )
    va_diff_indizes_match = all(self.va_diff_results.index.isin(lf_result.va_diff_results.index))
    converged_indizes_match = all(self.converged.index.isin(lf_result.converged.index))
    indices_match = (
        node_indizes_match
        and branch_indizes_match
        and regulating_element_indizes_match
        and va_diff_indizes_match
        and converged_indizes_match
    )
    if not indices_match:
        return False

    node_columns_match = all(self.node_results.columns.isin(lf_result.node_results.columns))
    branch_columns_match = all(self.branch_results.columns.isin(lf_result.branch_results.columns))
    regulating_element_columns_match = all(
        self.regulating_element_results.columns.isin(lf_result.regulating_element_results.columns)
    )
    va_diff_columns_match = all(self.va_diff_results.columns.isin(lf_result.va_diff_results.columns))
    converged_columns_match = all(self.converged.columns.isin(lf_result.converged.columns))
    columns_match = (
        node_columns_match
        and branch_columns_match
        and regulating_element_columns_match
        and va_diff_columns_match
        and converged_columns_match
    )
    if not columns_match:
        return False

    # Check values
    own_node_results = self.node_results.loc[
        lf_result.node_results.index.drop_duplicates(), lf_result.node_results.columns
    ].round(rounding_accuracy)
    node_values_match = own_node_results.equals(lf_result.node_results.round(rounding_accuracy))
    own_branch_results = self.branch_results.loc[
        lf_result.branch_results.index.drop_duplicates(), lf_result.branch_results.columns
    ].round(rounding_accuracy)
    branch_values_match = own_branch_results.equals(lf_result.branch_results.round(rounding_accuracy))

    own_regulating_element_results = self.regulating_element_results.loc[
        lf_result.regulating_element_results.index.drop_duplicates(), lf_result.regulating_element_results.columns
    ].round(rounding_accuracy)
    regulating_element_values_match = own_regulating_element_results.equals(
        lf_result.regulating_element_results.round(rounding_accuracy)
    )

    own_va_diff_results = self.va_diff_results.loc[
        lf_result.va_diff_results.index.drop_duplicates(), lf_result.va_diff_results.columns
    ].round(rounding_accuracy)
    va_diff_values_match = own_va_diff_results.equals(lf_result.va_diff_results.round(rounding_accuracy))

    own_converged = self.converged.loc[lf_result.converged.index.drop_duplicates(), lf_result.converged.columns].round(
        rounding_accuracy
    )
    converged_values_match = own_converged.equals(lf_result.converged.round(rounding_accuracy))
    values_match = (
        node_values_match
        and branch_values_match
        and regulating_element_values_match
        and va_diff_values_match
        and converged_values_match
    )
    if not values_match:
        return False
    return True

toop_engine_interfaces.loadflow_results_polars #

Defines performance-improved polars versions of the loadflow results.

The loadflow results here mirror what is defined in loadflow_results.py, but use polars dataframes which are faster.

LoadflowResultTablePolars module-attribute #

LoadflowResultTablePolars = Union[
    LazyFrame[NodeResultSchemaPolars],
    LazyFrame[BranchResultSchemaPolars],
    LazyFrame[VADiffResultSchemaPolars],
    LazyFrame[RegulatingElementResultSchemaPolars],
    LazyFrame[ConvergedSchemaPolars],
]

BranchResultSchemaPolars #

Bases: DataFrameModel, BranchResultSchema

Polars variant of BranchResultSchema.

timestep instance-attribute #

timestep

The timestep of this result. This indexes into the timesteps that were loaded

contingency instance-attribute #

contingency

The contingency that caused this loadflow. For N-0 results, the special CO 'BASECASE' without GridElements is used, if its added.

element instance-attribute #

element

The branch that these loadflow results correspond to

side class-attribute instance-attribute #

side = Field(isin=[(value) for side in BranchSide])

The side of the branch that these results correspond to

i class-attribute instance-attribute #

i = Field(nullable=True)

The current in the branch in A

This should only be NaN if the branch has no connection to the slack bus.

p class-attribute instance-attribute #

p = Field(nullable=True)

The active power in the branch in MW

This should only be NaN if the branch has no connection to the slack bus.

q class-attribute instance-attribute #

q = Field(nullable=True)

The reactive power in the branch in MVar

This should only be NaN if the branch has no connection to the slack bus.

loading class-attribute instance-attribute #

loading = Field(nullable=True)

The loading of the branch in % of rated current. This always refers to the permanent/default rating of the branch if there are multiple ratings available. If no rating is available for the branch, this should be set to NaN. If the engine does not support the computation of this value, the column can be omitted.

element_name class-attribute instance-attribute #

element_name = Field(default='')

The name of the Branch, if available. This is not used for the loadflow computation, but can be used for display purposes. If no name is available, this should be set to an empty string.

contingency_name class-attribute instance-attribute #

contingency_name = Field(default='')

The name of the contingency, if available. This is not used for the loadflow computation, but can be used for display purposes. If no name is available, this should be set to an empty string.

NodeResultSchemaPolars #

Bases: DataFrameModel, NodeResultSchema

Polars variant of NodeResultSchema.

timestep instance-attribute #

timestep

The timestep of this result. This indexes into the timesteps that were loaded

contingency instance-attribute #

contingency

The contingency that caused this loadflow. For N-0 results, the special CO 'BASECASE' is used.

element instance-attribute #

element

The node that these loadflow results correspond to

vm class-attribute instance-attribute #

vm = Field(nullable=True)

The voltage magnitude at the node in kV.

In DC, this should be the nominal voltage of the node. This should only be NaN if the node does not have a connection to the slack bus.

vm_loading class-attribute instance-attribute #

vm_loading = Field(nullable=True)

How close the voltage magnitude is to the max/min voltage limits in percent. This is computed as: (vm - v_nominal) / (v_max - v_nominal) for vm > v_nominal and (vm - v_nominal) / (v_nominal - v_min) for vm < v_nominal.

va class-attribute instance-attribute #

va = Field(nullable=True)

The voltage angle at the node in degrees

This should only be NaN if the node does not have a connection to the slack bus.

p class-attribute instance-attribute #

p = Field(nullable=True)

The accumulated absolute active power at the node in MW, obtained by summing the absolute active power of all branches and injections connected to the node.

If the engine does not support the computation of this value, the column can be omitted.

q class-attribute instance-attribute #

q = Field(nullable=True)

The accumulated absolute reactive power at the node in MVar, obtained by summing the absolute reactive power of all branches and injections connected to the node

If the engine does not support the computation of this value, the column can be omitted.

element_name class-attribute instance-attribute #

element_name = Field(default='', nullable=True)

The name of the node, if available. This is not used for the loadflow computation, but can be used for display purposes. If no name is available, this should be set to an empty string.

contingency_name class-attribute instance-attribute #

contingency_name = Field(default='', nullable=True)

The name of the contingency, if available. This is not used for the loadflow computation, but can be used for display purposes. If no name is available, this should be set to an empty string.

VADiffResultSchemaPolars #

Bases: DataFrameModel, VADiffResultSchema

Polars variant of VADiffResultSchema.

timestep instance-attribute #

timestep

The timestep of this result. This indexes into the timesteps that were loaded

contingency instance-attribute #

contingency

The critical contingency that caused this loadflow. For N-0 results, the special CO 'BASECASE' is used.

element instance-attribute #

element

The element over which the voltage angle difference is computed. Can be either an open switch or any switch or branch under N-1. If under N-1, then element and contingency are the same.

va_diff class-attribute instance-attribute #

va_diff = Field(nullable=True)

The voltage angle difference in degrees between the two ends of the element. nan if at least one of the ends has no voltage angle (island, out of service)

element_name class-attribute instance-attribute #

element_name = Field(default='')

The name of the Branch or Switch, if available. This is not used for the loadflow computation, but can be used for display purposes. If no name is available, this should be set to an empty string.

contingency_name class-attribute instance-attribute #

contingency_name = Field(default='')

The name of the contingency, if available. This is not used for the loadflow computation, but can be used for display purposes. If no name is available, this should be set to an empty string.

RegulatingElementResultSchemaPolars #

Bases: DataFrameModel, RegulatingElementResultSchema

Polars variant of RegulatingElementResultSchema.

timestep instance-attribute #

timestep

The timestep of this result. This indexes into the timesteps that were loaded

contingency instance-attribute #

contingency

The critical contingency that caused this loadflow. For N-0 results, the special CO 'BASECASE' is used.

element instance-attribute #

element

The regulating element that these loadflow results correspond to

value instance-attribute #

value

The value of the regulating element. Depending on the type of the regulating element, this can mean different things.

regulating_element_type class-attribute instance-attribute #

regulating_element_type = Field(
    isin=[(value) for side in RegulatingElementType]
)

The type of the regulating element (generator, regulating transformer, SVC, ...).

element_name class-attribute instance-attribute #

element_name = Field(default='')

The name of the Regulating Element, if available. This is not used for the loadflow computation, but can be used for display purposes. If no name is available, this should be set to an empty string.

contingency_name class-attribute instance-attribute #

contingency_name = Field(default='')

The name of the contingency, if available. This is not used for the loadflow computation, but can be used for display purposes. If no name is available, this should be set to an empty string.

ConvergedSchemaPolars #

Bases: DataFrameModel, ConvergedSchema

Polars variant of ConvergedSchema.

timestep instance-attribute #

timestep

The timestep of this result. This indexes into the timesteps that were loaded

contingency instance-attribute #

contingency

The critical contingency that caused this loadflow. For N-0 results, the special CO 'BASECASE' is used.

status class-attribute instance-attribute #

status = Field(isin=[(value) for side in ConvergenceStatus])

Whether the loadflow converged at this timestep/contingency.

iteration_count class-attribute instance-attribute #

iteration_count = Field(nullable=True)

The number of iterations required for the loadflow to converge.

warnings class-attribute instance-attribute #

warnings = Field(default='')

An additional string field that carries warnings or error logs for specific timesteps/contingencys/components.

contingency_name class-attribute instance-attribute #

contingency_name = Field(default='')

The name of the contingency, if available. This is not used for the loadflow computation, but can be used for display purposes. If no name is available, this should be set to an empty string.

LoadflowResultsPolars #

Bases: BaseModel

A container for the loadflow results for a computation job.

job_id instance-attribute #

job_id

The id of the computation job that created these loadflows

branch_results class-attribute instance-attribute #

branch_results = None

The results for the branches. If no branches are monitored, this is the empty DataFrame. Non converging contingencys/timesteps are to be omitted

node_results class-attribute instance-attribute #

node_results = None

The results for the nodes. If no nodes are monitored, this is the empty DataFrame.

regulating_element_results class-attribute instance-attribute #

regulating_element_results = None

The results for the regulating elements. If no regulating elements are monitored, this is the empty DataFrame.

converged class-attribute instance-attribute #

converged = None

The convergence information for each timestep and contingency. If there were non-converging loadflows for some timesteps/contingencys, these results should be omitted from the other tables.

va_diff_results class-attribute instance-attribute #

va_diff_results = None

The voltage angle difference results for each timestep and contingency. Considers the ends of the outaged branch, aswell as all open switches in monitored elements.

warnings class-attribute instance-attribute #

warnings = Field(default_factory=list)

Global warnings that occured during the computation (e.g. monitored elements/contingencies that were not found)

additional_information class-attribute instance-attribute #

additional_information = Field(default_factory=list)

Additional information that the loadflow solver wants to convey to the user. There is no limitation what can be put in here except that it needs to be json serializable.

Config #

Pydantic configuration for the LoadflowResultsPolars model.

arbitrary_types_allowed class-attribute instance-attribute #
arbitrary_types_allowed = True

Allow arbitrary types in the model.

__eq__ #

__eq__(lf_result)

Compare two LoadflowResults objects for equality.

Rounds floats to 6 decimal places for comparison. This is necessary because floating point arithmetic can lead to small differences in the results.

Ignores the order of the DataFrames, but checks that the indices are equal.

Note: This functions is not very efficient and can take up to half a minute for >10Mio rows.

PARAMETER DESCRIPTION
lf_result

The LoadflowResults object to compare with.

TYPE: LoadflowResults

RETURNS DESCRIPTION
bool

True if the two LoadflowResults objects are equal, False otherwise.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/loadflow_results_polars.py
def __eq__(self, lf_result: Self) -> bool:
    """Compare two LoadflowResults objects for equality.

    Rounds floats to 6 decimal places for comparison.
    This is necessary because floating point arithmetic can lead to small differences in the results.

    Ignores the order of the DataFrames, but checks that the indices are equal.

    Note: This functions is not very efficient and can take up to half a minute for >10Mio rows.

    Parameters
    ----------
    lf_result : LoadflowResults
        The LoadflowResults object to compare with.

    Returns
    -------
    bool
        True if the two LoadflowResults objects are equal, False otherwise.
    """
    rounding_accuracy = 1e-6

    if not isinstance(lf_result, LoadflowResultsPolars):
        return False

    job_match = self.job_id == lf_result.job_id
    warnings_match = self.warnings == lf_result.warnings
    additional_info_match = self.additional_information == lf_result.additional_information
    simple_checks = job_match and warnings_match and additional_info_match
    if not simple_checks:
        return False

    kw_args_testing = {
        "check_row_order": False,
        "check_column_order": False,
        "check_dtypes": True,
        "check_exact": False,
        "abs_tol": rounding_accuracy,
    }
    try:
        assert_frame_equal(self.branch_results, lf_result.branch_results, **kw_args_testing)
        assert_frame_equal(self.node_results, lf_result.node_results, **kw_args_testing)
        assert_frame_equal(self.regulating_element_results, lf_result.regulating_element_results, **kw_args_testing)
        assert_frame_equal(self.va_diff_results, lf_result.va_diff_results, **kw_args_testing)
        assert_frame_equal(self.converged, lf_result.converged, **kw_args_testing)
    except AssertionError:
        return False

    return True

toop_engine_interfaces.loadflow_result_helpers_polars #

Loadflow result helpers for polars LazyFrames or DataFrames.

Holds functions to work with the loadflow results interfaces.

save_loadflow_results_polars #

save_loadflow_results_polars(fs, file_path, loadflows)

Save loadflow results to a file in hdf5 format.

PARAMETER DESCRIPTION
fs

The filesystem to use to save the results. This can be a local filesystem or an object store like S3 or Azure, using the fsspec library. For writing to local disk, you should use the DirFilesystem to inject a base path like this:

from fsspec.implementations.local import DirFileSystem
fs = DirFileSystem(base_path="/path/to/base")
Similarly, buckets can be used with the appropriate fsspec filesystem like adbs

TYPE: AbstractFileSystem

file_path

The file path where to save the results to. This is relative to the base name or bucket defined in the storage

TYPE: str | Path

loadflows

The loadflow results to save.

TYPE: LoadflowResultsPolars

RETURNS DESCRIPTION
StoredLoadflowReference

A reference to the stored loadflow results.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/loadflow_result_helpers_polars.py
def save_loadflow_results_polars(
    fs: AbstractFileSystem, file_path: str | Path, loadflows: LoadflowResultsPolars
) -> StoredLoadflowReference:
    """Save loadflow results to a file in hdf5 format.

    Parameters
    ----------
    fs : AbstractFileSystem
        The filesystem to use to save the results. This can be a local filesystem or an object store like S3 or Azure, using
        the fsspec library. For writing to local disk, you should use the DirFilesystem to inject a base path like this:
        ```python
        from fsspec.implementations.local import DirFileSystem
        fs = DirFileSystem(base_path="/path/to/base")
        ```
        Similarly, buckets can be used with the appropriate fsspec filesystem like adbs
    file_path: str | Path
        The file path where to save the results to. This is relative to the base name or bucket defined in the storage
    loadflows : LoadflowResultsPolars
        The loadflow results to save.

    Returns
    -------
    StoredLoadflowReference
        A reference to the stored loadflow results.
    """
    file_path = str(file_path)
    fs.makedirs(file_path, exist_ok=True)
    metadata = {
        "job_id": loadflows.job_id,
        "warnings": loadflows.warnings,
        "additional_information": loadflows.additional_information,
    }
    with fs.open(file_path + "/metadata.json", "w") as f:
        json.dump(metadata, f)

    with fs.open(file_path + "/branch_results.parquet", "wb") as f:
        loadflows.branch_results.sink_parquet(f)
    with fs.open(file_path + "/node_results.parquet", "wb") as f:
        loadflows.node_results.sink_parquet(f)
    with fs.open(file_path + "/regulating_element_results.parquet", "wb") as f:
        loadflows.regulating_element_results.sink_parquet(f)
    with fs.open(file_path + "/converged.parquet", "wb") as f:
        loadflows.converged.sink_parquet(f)
    with fs.open(file_path + "/va_diff_results.parquet", "wb") as f:
        loadflows.va_diff_results.sink_parquet(f)

    return StoredLoadflowReference(
        relative_path=str(file_path),
    )

load_loadflow_results_polars #

load_loadflow_results_polars(fs, reference, validate=True)

Load loadflow results from a StoredLoadflowReference.

PARAMETER DESCRIPTION
fs

The filesystem to use to load the results. This can be a local filesystem or an object store like S3 or Azure, using the fsspec library.

TYPE: AbstractFileSystem

reference

The reference to the stored loadflow results.

TYPE: StoredLoadflowReference

validate

Whether to validate the loaded results against the schemas defined in the interfaces. For dataframes with a lot of data this can take a few seconds.

TYPE: bool DEFAULT: True

RETURNS DESCRIPTION
LoadflowResults

The loaded loadflow results.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/loadflow_result_helpers_polars.py
def load_loadflow_results_polars(
    fs: AbstractFileSystem, reference: StoredLoadflowReference, validate: bool = True
) -> LoadflowResultsPolars:
    """Load loadflow results from a StoredLoadflowReference.

    Parameters
    ----------
    fs: AbstractFileSystem
        The filesystem to use to load the results. This can be a local filesystem or an object store like S3 or Azure, using
        the fsspec library.
    reference: StoredLoadflowReference
        The reference to the stored loadflow results.
    validate: bool
        Whether to validate the loaded results against the schemas defined in the interfaces.
        For dataframes with a lot of data this can take a few seconds.

    Returns
    -------
    LoadflowResults
        The loaded loadflow results.
    """
    file_path = str(reference.relative_path)
    with fs.open(file_path + "/metadata.json", "r") as f:
        metadata = json.load(f)
    job_id = metadata["job_id"]
    warnings = metadata["warnings"]
    additional_information = metadata["additional_information"]

    with fs.open(file_path + "/branch_results.parquet", "rb") as f:
        branch_results = pl.scan_parquet(f)
    with fs.open(file_path + "/node_results.parquet", "rb") as f:
        node_results = pl.scan_parquet(f)
    with fs.open(file_path + "/regulating_element_results.parquet", "rb") as f:
        regulating_element_results = pl.scan_parquet(f)
    with fs.open(file_path + "/converged.parquet", "rb") as f:
        converged = pl.scan_parquet(f)
    with fs.open(file_path + "/va_diff_results.parquet", "rb") as f:
        va_diff_results = pl.scan_parquet(f)

    if validate:
        return LoadflowResultsPolars(
            job_id=job_id,
            branch_results=BranchResultSchemaPolars.validate(branch_results),
            node_results=NodeResultSchemaPolars.validate(node_results),
            regulating_element_results=RegulatingElementResultSchemaPolars.validate(regulating_element_results),
            converged=ConvergedSchemaPolars.validate(converged),
            va_diff_results=VADiffResultSchemaPolars.validate(va_diff_results),
            warnings=warnings,
            additional_information=additional_information,
        )

    return LoadflowResultsPolars.model_construct(
        job_id=job_id,
        branch_results=branch_results,
        node_results=node_results,
        regulating_element_results=regulating_element_results,
        converged=converged,
        va_diff_results=va_diff_results,
        warnings=warnings,
        additional_information=additional_information,
    )

concatenate_loadflow_results_polars #

concatenate_loadflow_results_polars(loadflow_results_list)

Concatenate the results of the loadflow results.

PARAMETER DESCRIPTION
loadflow_results_list

The list of loadflow results to concatenate

TYPE: list

RETURNS DESCRIPTION
LoadflowResultsPolars

The concatenated loadflow results

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/loadflow_result_helpers_polars.py
def concatenate_loadflow_results_polars(
    loadflow_results_list: list[LoadflowResultsPolars],
) -> LoadflowResultsPolars:
    """Concatenate the results of the loadflow results.

    Parameters
    ----------
    loadflow_results_list : list
        The list of loadflow results to concatenate

    Returns
    -------
    LoadflowResultsPolars
        The concatenated loadflow results
    """
    assert len(loadflow_results_list) > 0, "The list of loadflow results must not be empty"
    assert all(loadflow_results_list[0].job_id == res.job_id for res in loadflow_results_list), (
        "All loadflow results must have the same job_id"
    )
    # make sure None values are not included in the concatenation
    branch_results_list = [res.branch_results for res in loadflow_results_list if res.branch_results is not None]
    node_results_list = [res.node_results for res in loadflow_results_list if res.node_results is not None]
    regulating_element_results_list = [
        res.regulating_element_results for res in loadflow_results_list if res.regulating_element_results is not None
    ]
    converged_list = [res.converged for res in loadflow_results_list if res.converged is not None]
    va_diff_results_list = [res.va_diff_results for res in loadflow_results_list if res.va_diff_results is not None]

    branch_results = pl.concat(branch_results_list, how="vertical")
    node_results = pl.concat(node_results_list, how="vertical")
    regulating_element_results = pl.concat(regulating_element_results_list, how="vertical")
    converged = pl.concat(converged_list, how="vertical")
    va_diff_results = pl.concat(va_diff_results_list, how="vertical")
    warnings = [warning for lf_results in loadflow_results_list for warning in lf_results.warnings]
    additional_information = [
        additional_information
        for lf_results in loadflow_results_list
        for additional_information in lf_results.additional_information
    ]
    return LoadflowResultsPolars(
        job_id=loadflow_results_list[0].job_id,
        branch_results=branch_results,
        node_results=node_results,
        regulating_element_results=regulating_element_results,
        converged=converged,
        va_diff_results=va_diff_results,
        warnings=warnings,
        additional_information=additional_information,
    )

select_timestep_polars #

select_timestep_polars(loadflow_results, timestep)

Select a single timestep from the loadflow results.

PARAMETER DESCRIPTION
loadflow_results

The loadflow results to select the timestep from.

TYPE: LoadflowResultsPolars

timestep

The timestep to select.

TYPE: int

RETURNS DESCRIPTION
LoadflowResultsPolars

The loadflow results for the selected timestep.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/loadflow_result_helpers_polars.py
def select_timestep_polars(loadflow_results: LoadflowResultsPolars, timestep: int) -> LoadflowResultsPolars:
    """Select a single timestep from the loadflow results.

    Parameters
    ----------
    loadflow_results : LoadflowResultsPolars
        The loadflow results to select the timestep from.
    timestep : int
        The timestep to select.

    Returns
    -------
    LoadflowResultsPolars
        The loadflow results for the selected timestep.
    """
    return LoadflowResultsPolars(
        job_id=loadflow_results.job_id,
        branch_results=loadflow_results.branch_results.filter(pl.col("timestep") == timestep),
        node_results=loadflow_results.node_results.filter(pl.col("timestep") == timestep),
        regulating_element_results=loadflow_results.regulating_element_results.filter(pl.col("timestep") == timestep),
        converged=loadflow_results.converged.filter(pl.col("timestep") == timestep),
        va_diff_results=loadflow_results.va_diff_results.filter(pl.col("timestep") == timestep),
        warnings=loadflow_results.warnings,
        additional_information=loadflow_results.additional_information,
    )

extract_branch_results_polars #

extract_branch_results_polars(
    branch_results,
    timestep,
    contingencies,
    monitored_branches,
    basecase,
)

Extract the branch results for a specific timestep.

PARAMETER DESCRIPTION
branch_results

The branch results dataframe to extract the branch results from.

TYPE: BranchResultSchemaPolars

timestep

The selected timestep to pull from the loadflow results.

TYPE: int

basecase

The basecase contingency id to use for the N-0 results.

TYPE: str

contingencies

The list of contingencies to extract the results for.

TYPE: list[str]

monitored_branches

The list of monitored branches to extract the results for. buses switches etc should not be included here, only branches.

TYPE: list[GridElement]

RETURNS DESCRIPTION
Float[ndarray, ' n_contingencies n_branches_monitored']

The branch results with the following: - shape (n_contingencies, n_branches_monitored) - only the p values of the monitored branches at the from-end - For three winding transformers, the p values are split into three rows for each side (hv, mv, lv).

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/loadflow_result_helpers_polars.py
def extract_branch_results_polars(
    branch_results: BranchResultSchemaPolars,
    timestep: int,
    contingencies: list[str],
    monitored_branches: list[GridElement],
    basecase: str,
) -> tuple[Float[np.ndarray, " n_branches_monitored"], Float[np.ndarray, " n_contingencies n_branches_monitored"]]:
    """Extract the branch results for a specific timestep.

    Parameters
    ----------
    branch_results: BranchResultSchema,
        The branch results dataframe to extract the branch results from.
    timestep : int
        The selected timestep to pull from the loadflow results.
    basecase : str
        The basecase contingency id to use for the N-0 results.
    contingencies : list[str]
        The list of contingencies to extract the results for.
    monitored_branches : list[GridElement]
        The list of monitored branches to extract the results for.
        buses switches etc should not be included here, only branches.

    Returns
    -------
    Float[np.ndarray, " n_contingencies n_branches_monitored"]
        The branch results with the following:
        - shape (n_contingencies, n_branches_monitored)
        - only the p values of the monitored branches at the from-end
        - For three winding transformers, the p values are split into three rows for each side (hv, mv, lv).
    """
    assert basecase not in contingencies, "Basecase contingency should not be in the list of N-k contingencies"
    n_monitored_branches = len(monitored_branches)
    n_contingencies = len(contingencies)
    if (n_monitored_branches == 0) or (n_contingencies == 0 and basecase is None):
        # If there are no monitored branches, return empty arrays
        return np.full(n_monitored_branches, dtype=float), np.full((n_contingencies, n_monitored_branches), dtype=float)
    # Get the branch results for the given job_id and timestep
    three_winding_side_dict = {
        "trafo3w_hv": BranchSide.ONE.value,
        "trafo3w_mv": BranchSide.TWO.value,
        "trafo3w_lv": BranchSide.THREE.value,
    }
    all_cases = [basecase, *contingencies]

    normal_branches_ids = [elem.id for elem in monitored_branches if elem.type not in three_winding_side_dict]

    timestep_df = branch_results.select(pl.col("timestep").unique())
    contingency_df = timestep_df.join(pl.LazyFrame({"contingency": all_cases}), how="cross")

    normal_branch_df = contingency_df.join(pl.LazyFrame({"element": normal_branches_ids}), how="cross").with_columns(
        side=BranchSide.ONE.value
    )

    trafo3w_dfs = []
    for trafo_type, side in three_winding_side_dict.items():
        three_winding_branches = [element.id for element in monitored_branches if element.type == trafo_type]
        trafo_side_df = contingency_df.join(pl.LazyFrame({"element": three_winding_branches}), how="cross").with_columns(
            side=side
        )
        trafo3w_dfs.append(trafo_side_df)
    all_branches_df = pl.concat([normal_branch_df, *trafo3w_dfs], how="vertical")

    merge_columns = ["timestep", "contingency", "element", "side"]
    all_p_results = (
        all_branches_df.join(
            branch_results.filter(pl.col("timestep") == timestep).select([*merge_columns, "p"]), on=merge_columns, how="left"
        )
        .fill_null(0.0)
        .fill_nan(0.0)
    )

    n_0_results = all_p_results.filter(pl.col("contingency") == basecase).collect()
    n_0_vector = n_0_results["p"].to_numpy()

    sort_by = [
        pl.col("contingency").cast(pl.Enum(contingencies)),
        pl.col("element").cast(pl.Enum([elem.id for elem in monitored_branches])),
    ]
    is_not_basecase = pl.col("contingency") != basecase
    n_1_results = all_p_results.filter(is_not_basecase).sort(sort_by).collect()
    n_1_array = n_1_results["p"].to_numpy().reshape(n_contingencies, n_monitored_branches)
    return n_0_vector, n_1_array

extract_node_matrices_polars #

extract_node_matrices_polars(
    node_results,
    timestep,
    contingencies,
    monitored_nodes,
    basecase="BASECASE",
)

Extract the node results for a specific timestep.

PARAMETER DESCRIPTION
node_results

The node results polars dataframe to extract the node results from.

TYPE: NodeResultSchemaPolars

timestep

The selected timestep to pull from the loadflow results.

TYPE: int

basecase

The basecase contingency id to use for the N-0 results.

TYPE: str DEFAULT: 'BASECASE'

contingencies

The list of contingencies to extract the results for.

TYPE: list[str]

monitored_nodes

The list of monitored nodes to extract the results for. buses switches etc should not be included here, only nodes.

TYPE: list[GridElement]

RETURNS DESCRIPTION
vm_n0

The voltage magnitude results for the basecase contingency at the monitored nodes.

TYPE: Float[ndarray, ' n_nodes_monitored']

va_n0

The voltage angle results for the basecase contingency at the monitored nodes.

TYPE: Float[ndarray, ' n_nodes_monitored']

vm_n1

The voltage magnitude results for the contingencies at the monitored nodes.

TYPE: Float[ndarray, ' n_contingencies n_nodes_monitored']

va_n1

The voltage angle results for the contingencies at the monitored nodes.

TYPE: Float[ndarray, ' n_contingencies n_nodes_monitored']

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/loadflow_result_helpers_polars.py
def extract_node_matrices_polars(
    node_results: NodeResultSchemaPolars,
    timestep: int,
    contingencies: list[str],
    monitored_nodes: list[GridElement],
    basecase: str = "BASECASE",
) -> tuple[
    Float[np.ndarray, " n_nodes_monitored"],
    Float[np.ndarray, "  n_nodes_monitored"],
    Float[np.ndarray, " n_contingencies n_nodes_monitored"],
    Float[np.ndarray, " n_contingencies n_nodes_monitored"],
]:
    """Extract the node results for a specific timestep.

    Parameters
    ----------
    node_results: NodeResultSchemaPolars,
        The node results polars dataframe to extract the node results from.
    timestep : int
        The selected timestep to pull from the loadflow results.
    basecase : str
        The basecase contingency id to use for the N-0 results.
    contingencies : list[str]
        The list of contingencies to extract the results for.
    monitored_nodes : list[GridElement]
        The list of monitored nodes to extract the results for.
        buses switches etc should not be included here, only nodes.

    Returns
    -------
    vm_n0 : Float[np.ndarray, " n_nodes_monitored"]
        The voltage magnitude results for the basecase contingency at the monitored nodes.
    va_n0 : Float[np.ndarray, " n_nodes_monitored"]
        The voltage angle results for the basecase contingency at the monitored nodes.
    vm_n1 : Float[np.ndarray, " n_contingencies n_nodes_monitored"]
        The voltage magnitude results for the contingencies at the monitored nodes.
    va_n1 : Float[np.ndarray, " n_contingencies n_nodes_monitored"]
        The voltage angle results for the contingencies at the monitored nodes.
    """
    assert basecase not in contingencies, "Basecase contingency should not be in the list of N-k contingencies"
    n_contingencies = len(contingencies)
    n_monitored_nodes = len(monitored_nodes)
    if (n_monitored_nodes == 0) or (n_contingencies == 0 and basecase is None):
        # If there are no monitored nodes, return empty arrays
        return (
            np.full(n_monitored_nodes, dtype=float),
            np.full(n_monitored_nodes, dtype=float),
            np.full((n_contingencies, n_monitored_nodes), dtype=float),
            np.full((n_contingencies, n_monitored_nodes), dtype=float),
        )

    # Get the node results for the given job_id and timestep
    contingency_df = pl.LazyFrame({"contingency": [basecase, *contingencies]})
    all_cases_df = contingency_df.join(pl.LazyFrame({"element": [elem.id for elem in monitored_nodes]}), how="cross")
    node_results = all_cases_df.join(
        node_results.filter(pl.col("timestep") == timestep).select(["contingency", "element", "vm", "va"]),
        on=["contingency", "element"],
        how="left",
    )
    v_n0 = node_results.filter(pl.col("contingency") == basecase).select(["vm", "va"]).collect()
    vm_n0 = v_n0["vm"].to_numpy()
    va_n0 = v_n0["va"].to_numpy()

    v_n1 = node_results.filter(pl.col("contingency") != basecase).select(["vm", "va"]).collect()
    vm_n1 = v_n1["vm"].to_numpy()
    va_n1 = v_n1["va"].to_numpy()
    # reshape the results to have the contingencies as first dimension
    vm_n1_reshaped = vm_n1.reshape(len(contingencies), len(monitored_nodes))
    va_n1_reshaped = va_n1.reshape(len(contingencies), len(monitored_nodes))
    return vm_n0, va_n0, vm_n1_reshaped, va_n1_reshaped

extract_solver_matrices_polars #

extract_solver_matrices_polars(
    loadflow_results, nminus1_definition, timestep
)

Extract the N-0 and N-1 matrices in a similar format to the DC solver.

PARAMETER DESCRIPTION
loadflow_results

The loadflow results to extract the matrices from.

TYPE: LoadflowResults

nminus1_definition

The N-1 definition to use for the contingencies and monitored elements.

TYPE: Nminus1Definition

timestep

The selected timestep to pull from the loadflow results.

TYPE: int

RETURNS DESCRIPTION
Float[ndarray, ' n_branches_monitored']

The N-0 matrix

Float[ndarray, ' n_solver_contingencies n_branches_monitored']

The N-1 matrix

Bool[ndarray, ' n_solver_contingencies']

The convergence status of the contingencies in the N-1 matrix True if converged or not calculated, False if not converged.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/loadflow_result_helpers_polars.py
def extract_solver_matrices_polars(
    loadflow_results: LoadflowResultsPolars,
    nminus1_definition: Nminus1Definition,
    timestep: int,
) -> tuple[
    Float[np.ndarray, " n_branches_monitored"],
    Float[np.ndarray, " n_solver_contingencies n_branches_monitored"],
    Bool[np.ndarray, " n_solver_contingencies"],
]:
    """Extract the N-0 and N-1 matrices in a similar format to the DC solver.

    Parameters
    ----------
    loadflow_results : LoadflowResults
        The loadflow results to extract the matrices from.
    nminus1_definition : Nminus1Definition
        The N-1 definition to use for the contingencies and monitored elements.
    timestep : int
        The selected timestep to pull from the loadflow results.

    Returns
    -------
    Float[np.ndarray, " n_branches_monitored"]
        The N-0 matrix
    Float[np.ndarray, " n_solver_contingencies n_branches_monitored"]
        The N-1 matrix
    Bool[np.ndarray, " n_solver_contingencies"]
        The convergence status of the contingencies in the N-1 matrix
        True if converged or not calculated, False if not converged.
    """
    basecase = next((cont for cont in nminus1_definition.contingencies if cont.is_basecase()), None)
    assert basecase is not None, "No basecase contingency found in the N-1 definition."
    contingency_order = [cont.id for cont in nminus1_definition.contingencies if not cont.is_basecase()]

    # Only consider the selected timestep
    timestep_filter = pl.col("timestep") == timestep
    # For n-1 results, only consider non-basecase contingencies
    not_basecase_filter = pl.col("contingency") != basecase.id
    # A contingency is considered successful if it converged or if no calculation was performed
    # (e.g. for disconnected elements)
    is_success = pl.col("status").is_in([ConvergenceStatus.CONVERGED.value, ConvergenceStatus.NO_CALCULATION.value])

    filtered_converged = loadflow_results.converged.filter(timestep_filter & not_basecase_filter)
    sorted_converged = filtered_converged.sort(pl.col("contingency").cast(pl.Enum(contingency_order)))
    status_converged = sorted_converged.select(is_success).collect()

    success = status_converged["status"].to_numpy()

    branch_elements = [elem for elem in nminus1_definition.monitored_elements if elem.kind == "branch"]
    n_0_vector, n1_matrix = extract_branch_results_polars(
        branch_results=loadflow_results.branch_results,
        timestep=timestep,
        contingencies=contingency_order,
        monitored_branches=branch_elements,
        basecase=basecase.id,
    )

    return n_0_vector, n1_matrix, success

toop_engine_interfaces.loadflow_result_helpers #

Loadflow result helpers. Holds functions to work with the loadflow results interfaces.

save_loadflow_results #

save_loadflow_results(fs, file_path, loadflows)

Save loadflow results to a file in hdf5 format.

PARAMETER DESCRIPTION
fs

The filesystem to use to save the results. This can be a local filesystem or an object store like S3 or Azure, using the fsspec library. For writing to local disk, you should use the DirFilesystem to inject a base path like this:

from fsspec.implementations.local import DirFileSystem
fs = DirFileSystem(base_path="/path/to/base")
Similarly, buckets can be used with the appropriate fsspec filesystem like adbs

TYPE: AbstractFileSystem

file_path

The file path where to save the results to. This is relative to the base name or bucket defined in the storage

TYPE: str | Path

loadflows

The loadflow results to save.

TYPE: LoadflowResults

RETURNS DESCRIPTION
StoredLoadflowReference

A reference to the stored loadflow results.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/loadflow_result_helpers.py
def save_loadflow_results(
    fs: AbstractFileSystem, file_path: str | Path, loadflows: LoadflowResults
) -> StoredLoadflowReference:
    """Save loadflow results to a file in hdf5 format.

    Parameters
    ----------
    fs : AbstractFileSystem
        The filesystem to use to save the results. This can be a local filesystem or an object store like S3 or Azure, using
        the fsspec library. For writing to local disk, you should use the DirFilesystem to inject a base path like this:
        ```python
        from fsspec.implementations.local import DirFileSystem
        fs = DirFileSystem(base_path="/path/to/base")
        ```
        Similarly, buckets can be used with the appropriate fsspec filesystem like adbs
    file_path: str | Path
        The file path where to save the results to. This is relative to the base name or bucket defined in the storage
    loadflows : LoadflowResults
        The loadflow results to save.

    Returns
    -------
    StoredLoadflowReference
        A reference to the stored loadflow results.
    """
    file_path = str(file_path)
    fs.makedirs(file_path, exist_ok=True)
    metadata = {
        "job_id": loadflows.job_id,
        "warnings": loadflows.warnings,
        "additional_information": loadflows.additional_information,
    }
    with fs.open(file_path + "/metadata.json", "w") as f:
        json.dump(metadata, f)

    with fs.open(file_path + "/branch_results.parquet", "wb") as f:
        loadflows.branch_results.to_parquet(f)
    with fs.open(file_path + "/node_results.parquet", "wb") as f:
        loadflows.node_results.to_parquet(f)
    with fs.open(file_path + "/regulating_element_results.parquet", "wb") as f:
        loadflows.regulating_element_results.to_parquet(f)
    with fs.open(file_path + "/converged.parquet", "wb") as f:
        loadflows.converged.to_parquet(f)
    with fs.open(file_path + "/va_diff_results.parquet", "wb") as f:
        loadflows.va_diff_results.to_parquet(f)

    return StoredLoadflowReference(
        relative_path=str(file_path),
    )

load_loadflow_results #

load_loadflow_results(fs, reference, validate=True)

Load loadflow results from a StoredLoadflowReference.

PARAMETER DESCRIPTION
fs

The filesystem to use to load the results. This can be a local filesystem or an object store like S3 or Azure, using the fsspec library.

TYPE: AbstractFileSystem

reference

The reference to the stored loadflow results.

TYPE: StoredLoadflowReference

validate

Whether to validate the loaded results against the schemas defined in the interfaces. For dataframes with a lot of data this can take a few seconds.

TYPE: bool DEFAULT: True

RETURNS DESCRIPTION
LoadflowResults

The loaded loadflow results.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/loadflow_result_helpers.py
def load_loadflow_results(
    fs: AbstractFileSystem, reference: StoredLoadflowReference, validate: bool = True
) -> LoadflowResults:
    """Load loadflow results from a StoredLoadflowReference.

    Parameters
    ----------
    fs: AbstractFileSystem
        The filesystem to use to load the results. This can be a local filesystem or an object store like S3 or Azure, using
        the fsspec library.
    reference: StoredLoadflowReference
        The reference to the stored loadflow results.
    validate: bool
        Whether to validate the loaded results against the schemas defined in the interfaces.
        For dataframes with a lot of data this can take a few seconds.

    Returns
    -------
    LoadflowResults
        The loaded loadflow results.
    """
    file_path = str(reference.relative_path)
    with fs.open(file_path + "/metadata.json", "r") as f:
        metadata = json.load(f)
    job_id = metadata["job_id"]
    warnings = metadata["warnings"]
    additional_information = metadata["additional_information"]

    with fs.open(file_path + "/branch_results.parquet", "rb") as f:
        branch_results = pd.read_parquet(f)
    with fs.open(file_path + "/node_results.parquet", "rb") as f:
        node_results = pd.read_parquet(f)
    with fs.open(file_path + "/regulating_element_results.parquet", "rb") as f:
        regulating_element_results = pd.read_parquet(f)
    with fs.open(file_path + "/converged.parquet", "rb") as f:
        converged = pd.read_parquet(f)
    with fs.open(file_path + "/va_diff_results.parquet", "rb") as f:
        va_diff_results = pd.read_parquet(f)

    if validate:
        return LoadflowResults(
            job_id=job_id,
            branch_results=BranchResultSchema.validate(branch_results),
            node_results=NodeResultSchema.validate(node_results),
            regulating_element_results=RegulatingElementResultSchema.validate(regulating_element_results),
            converged=ConvergedSchema.validate(converged),
            va_diff_results=VADiffResultSchema.validate(va_diff_results),
            warnings=warnings,
            additional_information=additional_information,
        )
    return LoadflowResults.model_construct(
        job_id=job_id,
        branch_results=branch_results,
        node_results=node_results,
        regulating_element_results=regulating_element_results,
        converged=converged,
        va_diff_results=va_diff_results,
        warnings=warnings,
        additional_information=additional_information,
    )

concatenate_loadflow_results #

concatenate_loadflow_results(loadflow_results_list)

Concatenate the results of the loadflow results.

PARAMETER DESCRIPTION
loadflow_results_list

The list of loadflow results to concatenate

TYPE: list

RETURNS DESCRIPTION
LoadflowResults

The concatenated loadflow results

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/loadflow_result_helpers.py
def concatenate_loadflow_results(
    loadflow_results_list: list[LoadflowResults],
) -> LoadflowResults:
    """Concatenate the results of the loadflow results.

    Parameters
    ----------
    loadflow_results_list : list
        The list of loadflow results to concatenate

    Returns
    -------
    LoadflowResults
        The concatenated loadflow results
    """
    assert len(loadflow_results_list) > 0, "The list of loadflow results must not be empty"
    assert all(loadflow_results_list[0].job_id == res.job_id for res in loadflow_results_list), (
        "All loadflow results must have the same job_id"
    )
    branch_results = pd.concat([res.branch_results for res in loadflow_results_list], axis=0)
    node_results = pd.concat([res.node_results for res in loadflow_results_list], axis=0)
    regulating_element_results = pd.concat([res.regulating_element_results for res in loadflow_results_list], axis=0)
    converged = pd.concat([res.converged for res in loadflow_results_list], axis=0)
    va_diff_results = pd.concat([res.va_diff_results for res in loadflow_results_list], axis=0)
    warnings = [warning for lf_results in loadflow_results_list for warning in lf_results.warnings]
    additional_information = [
        additional_information
        for lf_results in loadflow_results_list
        for additional_information in lf_results.additional_information
    ]
    return LoadflowResults(
        job_id=loadflow_results_list[0].job_id,
        branch_results=branch_results,
        node_results=node_results,
        regulating_element_results=regulating_element_results,
        converged=converged,
        va_diff_results=va_diff_results,
        warnings=warnings,
        additional_information=additional_information,
    )

get_failed_branch_results #

get_failed_branch_results(
    timestep,
    failed_outages,
    monitored_2_end_branches,
    monitored_3_end_branches,
)

Get the failed branch results.

PARAMETER DESCRIPTION
timestep

The timestep of the results

TYPE: int

failed_outages

The list of failed outages

TYPE: list

monitored_2_end_branches

The list of monitored 2 end branches. i.e. most branches

TYPE: list

monitored_3_end_branches

The list of monitored 3 end branches. i.e. 3 winding transformers

TYPE: list

RETURNS DESCRIPTION
DataFrame[BranchResultSchema]

The failed branch results

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/loadflow_result_helpers.py
@pa.check_types
def get_failed_branch_results(
    timestep: int, failed_outages: list[str], monitored_2_end_branches: list[str], monitored_3_end_branches: list[str]
) -> pat.DataFrame[BranchResultSchema]:
    """Get the failed branch results.

    Parameters
    ----------
    timestep : int
        The timestep of the results
    failed_outages : list
        The list of failed outages
    monitored_2_end_branches : list
        The list of monitored 2 end branches. i.e. most branches
    monitored_3_end_branches : list
        The list of monitored 3 end branches. i.e. 3 winding transformers

    Returns
    -------
    pat.DataFrame[BranchResultSchema]
        The failed branch results
    """
    # With two Sides
    failed_branch_results = pd.DataFrame(
        index=pd.MultiIndex.from_product(
            [[timestep], failed_outages, monitored_2_end_branches, [BranchSide.ONE.value, BranchSide.TWO.value]],
            names=["timestep", "contingency", "element", "side"],
        )
    ).assign(p=np.nan, q=np.nan, i=np.nan, loading=np.nan)
    # Add results for non convergent contingencies
    failed_trafo3w_results = pd.DataFrame(
        index=pd.MultiIndex.from_product(
            [
                [timestep],
                failed_outages,
                monitored_3_end_branches,
                [BranchSide.ONE.value, BranchSide.TWO.value, BranchSide.THREE.value],
            ],
            names=["timestep", "contingency", "element", "side"],
        )
    ).assign(p=np.nan, q=np.nan, i=np.nan, loading=np.nan)
    converted_branch_results = pd.concat([failed_branch_results, failed_trafo3w_results], axis=0)
    # add empty element_name and contingency_name columns to match the schema
    converted_branch_results["element_name"] = ""
    converted_branch_results["contingency_name"] = ""
    return converted_branch_results

get_failed_node_results #

get_failed_node_results(
    timestep, failed_outages, monitored_nodes
)

Get the failed node results.

PARAMETER DESCRIPTION
timestep

The timestep of the results

TYPE: int

failed_outages

The list of failed outages

TYPE: list

monitored_nodes

The list of monitored nodes

TYPE: list

RETURNS DESCRIPTION
DataFrame[NodeResultSchema]

The failed node results

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/loadflow_result_helpers.py
@pa.check_types
def get_failed_node_results(
    timestep: int, failed_outages: list[str], monitored_nodes: list[str]
) -> pat.DataFrame[NodeResultSchema]:
    """Get the failed node results.

    Parameters
    ----------
    timestep : int
        The timestep of the results
    failed_outages : list
        The list of failed outages
    monitored_nodes : list
        The list of monitored nodes

    Returns
    -------
    pat.DataFrame[NodeResultSchema]
        The failed node results
    """
    failed_node_results = pd.DataFrame(
        index=pd.MultiIndex.from_product(
            [[timestep], failed_outages, monitored_nodes],
            names=["timestep", "contingency", "element"],
        )
    ).assign(vm=np.nan, va=np.nan, vm_loading=np.nan, p=np.nan, q=np.nan, element_name="", contingency_name="")
    # fill in empty columns to match the schema
    failed_node_results["p"] = np.nan
    failed_node_results["q"] = np.nan
    failed_node_results["element_name"] = ""
    failed_node_results["contingency_name"] = ""
    return failed_node_results

extract_branch_results #

extract_branch_results(
    branch_results,
    timestep,
    contingencies,
    monitored_branches,
    basecase,
)

Extract the branch results for a specific timestep.

PARAMETER DESCRIPTION
branch_results

The branch results dataframe to extract the branch results from.

TYPE: BranchResultSchema

timestep

The selected timestep to pull from the loadflow results.

TYPE: int

basecase

The basecase contingency id to use for the N-0 results.

TYPE: str

contingencies

The list of contingencies to extract the results for.

TYPE: list[str]

monitored_branches

The list of monitored branches to extract the results for. buses switches etc should not be included here, only branches.

TYPE: list[GridElement]

RETURNS DESCRIPTION
Float[ndarray, ' n_contingencies n_branches_monitored']

The branch results with the following: - shape (n_contingencies, n_branches_monitored) - only the p values of the monitored branches at the from-end - For three winding transformers, the p values are split into three rows for each side (hv, mv, lv).

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/loadflow_result_helpers.py
def extract_branch_results(
    branch_results: BranchResultSchema,
    timestep: int,
    contingencies: list[str],
    monitored_branches: list[GridElement],
    basecase: str,
) -> tuple[Float[np.ndarray, " n_branches_monitored"], Float[np.ndarray, " n_contingencies n_branches_monitored"]]:
    """Extract the branch results for a specific timestep.

    Parameters
    ----------
    branch_results: BranchResultSchema,
        The branch results dataframe to extract the branch results from.
    timestep : int
        The selected timestep to pull from the loadflow results.
    basecase : str
        The basecase contingency id to use for the N-0 results.
    contingencies : list[str]
        The list of contingencies to extract the results for.
    monitored_branches : list[GridElement]
        The list of monitored branches to extract the results for.
        buses switches etc should not be included here, only branches.

    Returns
    -------
    Float[np.ndarray, " n_contingencies n_branches_monitored"]
        The branch results with the following:
        - shape (n_contingencies, n_branches_monitored)
        - only the p values of the monitored branches at the from-end
        - For three winding transformers, the p values are split into three rows for each side (hv, mv, lv).
    """
    assert basecase not in contingencies, "Basecase contingency should not be in the list of N-k contingencies"
    n_monitored_branches = len(monitored_branches)
    n_contingencies = len(contingencies)
    if (n_monitored_branches == 0) or (n_contingencies == 0 and basecase is None):
        # If there are no monitored branches, return empty arrays
        return np.full(n_monitored_branches, dtype=float), np.full((n_contingencies, n_monitored_branches), dtype=float)
    # Get the branch results for the given job_id and timestep
    three_winding_side_dict = {
        "trafo3w_hv": [BranchSide.ONE.value],
        "trafo3w_mv": [BranchSide.TWO.value],
        "trafo3w_lv": [BranchSide.THREE.value],
    }
    all_cases = [basecase, *contingencies]
    normal_branches_ids = [elem.id for elem in monitored_branches if elem.type not in three_winding_side_dict]
    normal_branch_multi_idx = [*product([timestep], all_cases, normal_branches_ids, [BranchSide.ONE.value])]
    trafo3w_idx = []
    for trafo_type, sides in three_winding_side_dict.items():
        three_winding_branches = [element.id for element in monitored_branches if element.type == trafo_type]
        multi_idx = product([timestep], all_cases, three_winding_branches, sides)
        trafo3w_idx.extend(multi_idx)

    multi_index = pd.MultiIndex.from_tuples(
        [*normal_branch_multi_idx, *trafo3w_idx],
        names=["timestep", "contingency", "element", "side"],
    )

    # Drop timestep and side, since we do not need them anymore
    p_results = branch_results.reindex(multi_index, fill_value=0.0).droplevel(["side", "timestep"])["p"]

    # bring into correct order
    monitored_branches_order = [elem.id for elem in monitored_branches]

    n_0_vector = p_results.fillna(0.0).loc[basecase, monitored_branches_order].values

    n_1_index = pd.MultiIndex.from_product([contingencies, monitored_branches_order], names=["contingency", "element"])
    n_1_results = p_results.reindex(n_1_index, fill_value=0.0)
    n_1_array = n_1_results.fillna(0.0).values.reshape(len(contingencies), len(monitored_branches_order))
    return n_0_vector, n_1_array

extract_node_matrices #

extract_node_matrices(
    node_results,
    timestep,
    contingencies,
    monitored_nodes,
    basecase="BASECASE",
)

Extract the node results for a specific timestep.

PARAMETER DESCRIPTION
node_results

The node results dataframe to extract the node results from.

TYPE: NodeResultSchema

timestep

The selected timestep to pull from the loadflow results.

TYPE: int

basecase

The basecase contingency id to use for the N-0 results.

TYPE: str DEFAULT: 'BASECASE'

contingencies

The list of contingencies to extract the results for.

TYPE: list[str]

monitored_nodes

The list of monitored nodes to extract the results for. buses switches etc should not be included here, only nodes.

TYPE: list[GridElement]

RETURNS DESCRIPTION
vm_n0

The voltage magnitude results for the basecase contingency at the monitored nodes.

TYPE: Float[ndarray, ' n_nodes_monitored']

va_n0

The voltage angle results for the basecase contingency at the monitored nodes.

TYPE: Float[ndarray, ' n_nodes_monitored']

vm_n1

The voltage magnitude results for the contingencies at the monitored nodes.

TYPE: Float[ndarray, ' n_contingencies n_nodes_monitored']

va_n1

The voltage angle results for the contingencies at the monitored nodes.

TYPE: Float[ndarray, ' n_contingencies n_nodes_monitored']

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/loadflow_result_helpers.py
def extract_node_matrices(
    node_results: NodeResultSchema,
    timestep: int,
    contingencies: list[str],
    monitored_nodes: list[GridElement],
    basecase: str = "BASECASE",
) -> tuple[
    Float[np.ndarray, " n_nodes_monitored"],
    Float[np.ndarray, "  n_nodes_monitored"],
    Float[np.ndarray, " n_contingencies n_nodes_monitored"],
    Float[np.ndarray, " n_contingencies n_nodes_monitored"],
]:
    """Extract the node results for a specific timestep.

    Parameters
    ----------
    node_results: NodeResultSchema,
        The node results dataframe to extract the node results from.
    timestep : int
        The selected timestep to pull from the loadflow results.
    basecase : str
        The basecase contingency id to use for the N-0 results.
    contingencies : list[str]
        The list of contingencies to extract the results for.
    monitored_nodes : list[GridElement]
        The list of monitored nodes to extract the results for.
        buses switches etc should not be included here, only nodes.

    Returns
    -------
    vm_n0 : Float[np.ndarray, " n_nodes_monitored"]
        The voltage magnitude results for the basecase contingency at the monitored nodes.
    va_n0 : Float[np.ndarray, " n_nodes_monitored"]
        The voltage angle results for the basecase contingency at the monitored nodes.
    vm_n1 : Float[np.ndarray, " n_contingencies n_nodes_monitored"]
        The voltage magnitude results for the contingencies at the monitored nodes.
    va_n1 : Float[np.ndarray, " n_contingencies n_nodes_monitored"]
        The voltage angle results for the contingencies at the monitored nodes.
    """
    assert basecase not in contingencies, "Basecase contingency should not be in the list of N-k contingencies"
    n_contingencies = len(contingencies)
    n_monitored_nodes = len(monitored_nodes)
    if (n_monitored_nodes == 0) or (n_contingencies == 0 and basecase is None):
        # If there are no monitored nodes, return empty arrays
        return (
            np.full(n_monitored_nodes, dtype=float),
            np.full(n_monitored_nodes, dtype=float),
            np.full((n_contingencies, n_monitored_nodes), dtype=float),
            np.full((n_contingencies, n_monitored_nodes), dtype=float),
        )

    # Get the node results for the given job_id and timestep
    node_results = node_results.xs(timestep, level="timestep")
    # Reindex to ensure all contingencies and monitored nodes are present, fill missing with 0
    product_index = pd.MultiIndex.from_product(
        [[basecase, *contingencies], [elem.id for elem in monitored_nodes]],
        names=["contingency", "element"],
    )
    node_results = node_results.reindex(product_index, fill_value=np.nan)
    vm_n0 = node_results.loc[basecase, "vm"].values
    va_n0 = node_results.loc[basecase, "va"].values

    vm_n1 = node_results.loc[contingencies, :]["vm"].values
    va_n1 = node_results.loc[contingencies, :]["va"].values
    # reshape the results to have the contingencies as first dimension
    vm_n1 = vm_n1.reshape(len(contingencies), len(monitored_nodes))
    va_n1 = va_n1.reshape(len(contingencies), len(monitored_nodes))
    return vm_n0, va_n0, vm_n1, va_n1

extract_solver_matrices #

extract_solver_matrices(
    loadflow_results, nminus1_definition, timestep
)

Extract the N-0 and N-1 matrices in a similar format to the DC solver.

PARAMETER DESCRIPTION
loadflow_results

The loadflow results to extract the matrices from.

TYPE: LoadflowResults

nminus1_definition

The N-1 definition to use for the contingencies and monitored elements.

TYPE: Nminus1Definition

timestep

The selected timestep to pull from the loadflow results.

TYPE: int

RETURNS DESCRIPTION
Float[ndarray, ' n_branches_monitored']

The N-0 matrix

Float[ndarray, ' n_solver_contingencies n_branches_monitored']

The N-1 matrix

Bool[ndarray, ' n_solver_contingencies']

The convergence status of the contingencies in the N-1 matrix True if converged or not calculated, False if not converged.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/loadflow_result_helpers.py
def extract_solver_matrices(
    loadflow_results: LoadflowResults,
    nminus1_definition: Nminus1Definition,
    timestep: int,
) -> tuple[
    Float[np.ndarray, " n_branches_monitored"],
    Float[np.ndarray, " n_solver_contingencies n_branches_monitored"],
    Bool[np.ndarray, " n_solver_contingencies"],
]:
    """Extract the N-0 and N-1 matrices in a similar format to the DC solver.

    Parameters
    ----------
    loadflow_results : LoadflowResults
        The loadflow results to extract the matrices from.
    nminus1_definition : Nminus1Definition
        The N-1 definition to use for the contingencies and monitored elements.
    timestep : int
        The selected timestep to pull from the loadflow results.

    Returns
    -------
    Float[np.ndarray, " n_branches_monitored"]
        The N-0 matrix
    Float[np.ndarray, " n_solver_contingencies n_branches_monitored"]
        The N-1 matrix
    Bool[np.ndarray, " n_solver_contingencies"]
        The convergence status of the contingencies in the N-1 matrix
        True if converged or not calculated, False if not converged.
    """
    basecase = next((cont for cont in nminus1_definition.contingencies if cont.is_basecase()), None)
    assert basecase is not None, "No basecase contingency found in the N-1 definition."
    contingency_order = [cont.id for cont in nminus1_definition.contingencies if not cont.is_basecase()]

    success = (
        (
            loadflow_results.converged["status"]
            .loc[timestep]
            .isin([ConvergenceStatus.CONVERGED.value, ConvergenceStatus.NO_CALCULATION.value])
        )
        .reindex(contingency_order, fill_value=False)
        .values
    )
    branch_elements = [elem for elem in nminus1_definition.monitored_elements if elem.kind == "branch"]
    n_0_vector, n1_matrix = extract_branch_results(
        branch_results=loadflow_results.branch_results,
        timestep=timestep,
        contingencies=contingency_order,
        monitored_branches=branch_elements,
        basecase=basecase.id,
    )

    return n_0_vector, n1_matrix, success

select_timestep #

select_timestep(loadflow_results, timestep)

Select a specific timestep from the loadflow results.

PARAMETER DESCRIPTION
loadflow_results

The loadflow results to select the timestep from.

TYPE: LoadflowResults

timestep

The timestep to select.

TYPE: int

RETURNS DESCRIPTION
LoadflowResults

The loadflow results for the selected timestep.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/loadflow_result_helpers.py
def select_timestep(loadflow_results: LoadflowResults, timestep: int) -> LoadflowResults:
    """Select a specific timestep from the loadflow results.

    Parameters
    ----------
    loadflow_results : LoadflowResults
        The loadflow results to select the timestep from.
    timestep : int
        The timestep to select.

    Returns
    -------
    LoadflowResults
        The loadflow results for the selected timestep.
    """

    def safe_xs(df: pd.DataFrame) -> pd.DataFrame:
        """Safely select a timestep from a DataFrame."""
        try:
            return df.xs(timestep, level="timestep", drop_level=False)
        except KeyError:
            return df.iloc[0:0]

    return LoadflowResults(
        job_id=loadflow_results.job_id,
        warnings=loadflow_results.warnings,
        additional_information=loadflow_results.additional_information,
        branch_results=safe_xs(loadflow_results.branch_results),
        node_results=safe_xs(loadflow_results.node_results),
        regulating_element_results=safe_xs(loadflow_results.regulating_element_results),
        converged=safe_xs(loadflow_results.converged),
        va_diff_results=safe_xs(loadflow_results.va_diff_results),
    )

convert_polars_loadflow_results_to_pandas #

convert_polars_loadflow_results_to_pandas(
    loadflow_results_polars,
)

Convert the LoadflowResultsPolars class to LoadflowResults class.

PARAMETER DESCRIPTION
loadflow_results_polars

The loadflow results in polars format.

TYPE: LoadflowResultsPolars

RETURNS DESCRIPTION
LoadflowResults

The loadflow results in pandas format.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/loadflow_result_helpers.py
def convert_polars_loadflow_results_to_pandas(
    loadflow_results_polars: LoadflowResultsPolars,
) -> LoadflowResults:
    """Convert the LoadflowResultsPolars class to LoadflowResults class.

    Parameters
    ----------
    loadflow_results_polars : LoadflowResultsPolars
        The loadflow results in polars format.

    Returns
    -------
    LoadflowResults
        The loadflow results in pandas format.
    """

    def polars_to_pandas(df: Optional[Union[pl.DataFrame, pl.LazyFrame]]) -> Optional[pd.DataFrame]:
        """Convert a polars DataFrame or LazyFrame to a pandas DataFrame.

        Parameters
        ----------
        df : Optional[Union[pl.DataFrame, pl.LazyFrame]]
            The polars DataFrame or LazyFrame to convert.

        Returns
        -------
        Optional[pd.DataFrame]
            The pandas DataFrame or None if the input was None.
        """
        if df is None:
            return None
        if hasattr(df, "collect"):
            df = df.collect()
        pdf = df.to_pandas()
        # Set multi-index if possible
        index_cols = []
        for col in ["timestep", "contingency", "element", "side"]:
            if col in pdf.columns:
                index_cols.append(col)
        if index_cols:
            pdf = pdf.set_index(index_cols)
        return pdf

    return LoadflowResults(
        job_id=loadflow_results_polars.job_id,
        branch_results=polars_to_pandas(loadflow_results_polars.branch_results),
        node_results=polars_to_pandas(loadflow_results_polars.node_results),
        regulating_element_results=polars_to_pandas(loadflow_results_polars.regulating_element_results),
        converged=polars_to_pandas(loadflow_results_polars.converged),
        va_diff_results=polars_to_pandas(loadflow_results_polars.va_diff_results),
        warnings=loadflow_results_polars.warnings,
        additional_information=loadflow_results_polars.additional_information,
    )

convert_pandas_loadflow_results_to_polars #

convert_pandas_loadflow_results_to_polars(loadflow_results)

Convert the LoadflowResults class to LoadflowResultsPolars class.

PARAMETER DESCRIPTION
loadflow_results

The loadflow results in pandas format.

TYPE: LoadflowResults

RETURNS DESCRIPTION
LoadflowResultsPolars

The loadflow results in polars format.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/loadflow_result_helpers.py
def convert_pandas_loadflow_results_to_polars(loadflow_results: LoadflowResults) -> LoadflowResultsPolars:
    """Convert the LoadflowResults class to LoadflowResultsPolars class.

    Parameters
    ----------
    loadflow_results : LoadflowResults
        The loadflow results in pandas format.

    Returns
    -------
    LoadflowResultsPolars
        The loadflow results in polars format.
    """

    def pandas_to_polars(df: Optional[pd.DataFrame], lazy: bool) -> Optional[pl.DataFrame]:
        """Convert a pandas DataFrame to a polars DataFrame.

        Parameters
        ----------
        df : Optional[pd.DataFrame]
            The pandas DataFrame to convert.
        lazy : bool
            Whether to return a LazyFrame or a DataFrame.

        Returns
        -------
        Optional[pl.DataFrame]
            The polars DataFrame or None if the input was None.
        """
        if df is None:
            return None
        if isinstance(df, pd.DataFrame):
            df = pl.from_pandas(df, include_index=True, nan_to_null=False)
        if lazy:
            df = df.lazy()  # Assume it's a pandas DataFrame
        return df  # Assume it's already a polars DataFrame

    return LoadflowResultsPolars(
        job_id=loadflow_results.job_id,
        branch_results=pandas_to_polars(loadflow_results.branch_results, lazy=True),
        node_results=pandas_to_polars(loadflow_results.node_results, lazy=True),
        regulating_element_results=pandas_to_polars(loadflow_results.regulating_element_results, lazy=True),
        converged=pandas_to_polars(loadflow_results.converged, lazy=True),
        va_diff_results=pandas_to_polars(loadflow_results.va_diff_results, lazy=True),
        warnings=loadflow_results.warnings,
        additional_information=loadflow_results.additional_information,
        lazy=True,
    )

toop_engine_interfaces.nminus1_definition #

The N-1 definition holds monitored and outaged elements for a grid.

This information is not present in the grid models and hence needs to be stored separately to run an N-1 computation. The order of the outages should be the same as in the jax code, where it's hardcoded to the following: - branch outages - multi outage - non-relevant injection outages - relevant injection outages

POWSYBL_SUPPORTED_ID_TYPES module-attribute #

POWSYBL_SUPPORTED_ID_TYPES = Literal[
    "powsybl", "cgmes", "ucte"
]

PANDAPOWER_SUPPORTED_ID_TYPES module-attribute #

PANDAPOWER_SUPPORTED_ID_TYPES = Literal[
    "unique_pandapower", "cgmes"
]

ELEMENT_ID_TYPES module-attribute #

GridElement #

Bases: BaseModel

A grid element is identified by its id(powsybl) or its id and type (pandapower)

id instance-attribute #

id

The id of the element. For powsybl grids this is the global string id, for pandapower this is the integer index into the dataframe

name class-attribute instance-attribute #

name = ''

The name of the element. This is optional, but can be used to provide a more human-readable name for the element.

type instance-attribute #

type

For pandapower, we need to further specify a type which corresponds to the table pandapower stores the information in. Valid tables are 'line', 'trafo', 'ext_grid', 'gen', 'load', 'shunt', ... For powsybl, this is not strictly needed to identify the element however it makes it easier. In that case, type will be something like TIE_LINE, LINE, TWO_WINDING_TRANSFORMER, GENERATOR, etc.

kind instance-attribute #

kind

The kind of the element. Usually these are handled differently in the grid modelling software, so it can make assembling an N-1 analysis easier if it is known if the element is a branch, bus or injection. This could be inferred from the type, however for conveniece it is stored separately.

For the bus type there is some potential confusion in powsybl. In pandapower, this always refers to the net.bus df. In powsybl in a bus/branch model, there are no busbar sections in powsybl, i.e. net.get_node_breaker_topology does not deliver busbar sections. Meaning, the "bus" type refers to the net.get_bus_breaker_topology buses if it's a bus/breaker topology bus. If it's a node/breaker topology, then "bus" refers to the busbar section.

Contingency #

Bases: BaseModel

A single N-1 case

elements instance-attribute #

elements

The grid elements that are to be outaged under this contingency. Usually, this will be exactly one element however exceptional contingencies and multi-outages might include more than one element.

id instance-attribute #

id

The id of the contingency. This is used to identify the contingency in the results. It should be unique across all contingencies in the N-1 definition.

name class-attribute instance-attribute #

name = ''

The name of the contingency. This is optional, but can be used to provide a more human-readable name. This will show up in the Loadflowresult-tables as column contingency_name.

is_multi_outage #

is_multi_outage()

Check if the contingency is a multi-outage.

A multi-outage is defined as a contingency that has more than one element in it.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/nminus1_definition.py
def is_multi_outage(self) -> bool:
    """Check if the contingency is a multi-outage.

    A multi-outage is defined as a contingency that has more than one element in it.
    """
    return len(self.elements) > 1

is_basecase #

is_basecase()

Check if the contingency is the N-0 base case.

A base case is defined as a contingency that has no elements in it.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/nminus1_definition.py
def is_basecase(self) -> bool:
    """Check if the contingency is the N-0 base case.

    A base case is defined as a contingency that has no elements in it.
    """
    return len(self.elements) == 0

is_single_outage #

is_single_outage()

Check if the contingency is a normal single-element outage.

A single outage is defined as a contingency that has exactly one element in it.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/nminus1_definition.py
def is_single_outage(self) -> bool:
    """Check if the contingency is a normal single-element outage.

    A single outage is defined as a contingency that has exactly one element in it.
    """
    return len(self.elements) == 1

LoadflowParameters #

Bases: BaseModel

Loadflow parameters for the N-1 computation.

distributed_slack class-attribute instance-attribute #

distributed_slack = False

Whether to distribute the slack across all injections in the grid. Only relevant for powsybl grids.

contingency_propagation class-attribute instance-attribute #

contingency_propagation = False

Whether to enable powsybl's contingency propagation in the N-1 analysis.

Powsybl: https://powsybl.readthedocs.io/projects/powsybl-open-loadflow/en/latest/security/parameters.html Security Analysis will determine by topological search the switches with type circuit breakers (i.e. capable of opening fault currents) that must be opened to isolate the fault. Depending on the network structure, this could lead to more equipments to be simulated as tripped, because disconnectors and load break switches (i.e., not capable of opening fault currents) are not considered.

Pandapower: Currently not supported in pandapower.

Nminus1Definition #

Bases: BaseModel

An N-1 definition holds monitored and outaged elements for a grid.

For powsybl, ids are unique across types (i.e. a branch and an injection can not have the same id), however in pandapower, ids are not unique and we have to store the type alongside with them.

monitored_elements instance-attribute #

monitored_elements

A list of monitored elements that should be observed during the N-1 computation.

contingencies instance-attribute #

contingencies

A list of contingencies that should be computed during the N-1 computation.

loadflow_parameters class-attribute instance-attribute #

loadflow_parameters = Field(
    default_factory=LoadflowParameters
)

Loadflow parameters for the N-1 computation.

id_type class-attribute instance-attribute #

id_type = None

The type of the ids used in the N-1 definition. This is used to determine how to interpret the ids in the monitored elements and contingencies. See ELEMENT_ID_TYPES for more information. If none, pandapower will try to use the globally unique ids, and powsybl will use the global string ids.

base_case property #

base_case

Get the base case contingency, which is the contingency with no elements in it.

__getitem__ #

__getitem__(key)

Get a subset of the nminus1definition based on the contingencies.

If a string is given, the contingency id must be in the contingencies list. If an integer or slice is given, the case id will be indexed by the integer or slice.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/nminus1_definition.py
def __getitem__(self, key: str | int | slice) -> "Nminus1Definition":
    """Get a subset of the nminus1definition based on the contingencies.

    If a string is given, the contingency id must be in the contingencies list.
    If an integer or slice is given, the case id will be indexed by the integer or slice.
    """
    if isinstance(key, str):
        contingency_ids = [contingency.id for contingency in self.contingencies]
        if key not in contingency_ids:
            raise KeyError(f"Contingency id {key} not in contingencies.")
        index = contingency_ids.index(key)
        index = slice(index, index + 1)
    elif isinstance(key, int):
        index = slice(key, key + 1)
    elif isinstance(key, slice):
        index = key
    else:
        raise TypeError("Key must be a string, int or slice.")

    # pylint: disable=unsubscriptable-object
    return Nminus1Definition(
        monitored_elements=self.monitored_elements,
        contingencies=self.contingencies[index],
        loadflow_parameters=self.loadflow_parameters,
    )

load_nminus1_definition_fs #

load_nminus1_definition_fs(filesystem, file_path)

Load an N-1 definition from a file system.

PARAMETER DESCRIPTION
filesystem

The file system to use to load the N-1 definition.

TYPE: AbstractFileSystem

file_path

The path to the file containing the N-1 definition in json format.

TYPE: Union[str, Path]

RETURNS DESCRIPTION
Nminus1Definition

The loaded N-1 definition.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/nminus1_definition.py
def load_nminus1_definition_fs(
    filesystem: AbstractFileSystem,
    file_path: Union[str, Path],
) -> Nminus1Definition:
    """Load an N-1 definition from a file system.

    Parameters
    ----------
    filesystem : AbstractFileSystem
        The file system to use to load the N-1 definition.
    file_path : Union[str, Path]
        The path to the file containing the N-1 definition in json format.

    Returns
    -------
    Nminus1Definition
        The loaded N-1 definition.
    """
    return load_pydantic_model_fs(
        filesystem=filesystem,
        file_path=file_path,
        model_class=Nminus1Definition,
    )

load_nminus1_definition #

load_nminus1_definition(filename)

Load an N-1 definition from a json file

PARAMETER DESCRIPTION
filename

The path to the json file containing the N-1 definition.

TYPE: Path

RETURNS DESCRIPTION
Nminus1Definition

The loaded N-1 definition.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/nminus1_definition.py
def load_nminus1_definition(filename: Path) -> Nminus1Definition:
    """Load an N-1 definition from a json file

    Parameters
    ----------
    filename : Path
        The path to the json file containing the N-1 definition.

    Returns
    -------
    Nminus1Definition
        The loaded N-1 definition.
    """
    return load_nminus1_definition_fs(
        filesystem=LocalFileSystem(),
        file_path=filename,
    )

save_nminus1_definition #

save_nminus1_definition(filename, nminus1_definition)

Save an N-1 definition to a json file

PARAMETER DESCRIPTION
filename

The path to the json file to save the N-1 definition to.

TYPE: Path

nminus1_definition

The N-1 definition to save.

TYPE: Nminus1Definition

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/nminus1_definition.py
def save_nminus1_definition(filename: Path, nminus1_definition: Nminus1Definition) -> None:
    """Save an N-1 definition to a json file

    Parameters
    ----------
    filename : Path
        The path to the json file to save the N-1 definition to.
    nminus1_definition : Nminus1Definition
        The N-1 definition to save.
    """
    save_pydantic_model_fs(filesystem=LocalFileSystem(), file_path=filename, pydantic_model=nminus1_definition)

Asset Topology#

toop_engine_interfaces.asset_topology #

Contains the data models for the asset topology.

BranchEnd module-attribute #

BranchEnd = Literal['from', 'to', 'hv', 'mv', 'lv']

AssetBranchTypePandapower module-attribute #

AssetBranchTypePandapower = Literal[
    "line",
    "trafo",
    "trafo3w_lv",
    "trafo3w_mv",
    "trafo3w_hv",
    "impedance",
]

AssetBranchTypePowsybl module-attribute #

AssetBranchTypePowsybl = Literal[
    "LINE", "TWO_WINDINGS_TRANSFORMER", "TIE_LINE"
]

AssetBranchType module-attribute #

AssetBranchType = Literal[
    AssetBranchTypePandapower, AssetBranchTypePowsybl
]

AssetInjectionTypePandapower module-attribute #

AssetInjectionTypePandapower = Literal[
    "ext_grid",
    "gen",
    "load",
    "shunt",
    "sgen",
    "ward",
    "ward_load",
    "ward_shunt",
    "xward",
    "xward_load",
    "xward_shunt",
    "dcline_from",
    "dcline_to",
]

AssetInjectionTypePowsybl module-attribute #

AssetInjectionTypePowsybl = Literal[
    "LOAD",
    "GENERATOR",
    "DANGLING_LINE",
    "HVDC_CONVERTER_STATION",
    "STATIC_VAR_COMPENSATOR",
    "SHUNT_COMPENSATOR",
    "BATTERY",
]

AssetInjectionType module-attribute #

AssetType module-attribute #

AssetType = Literal[AssetBranchType, AssetInjectionType]

PowsyblSwitchValues #

Bases: Enum

Enum for the switch values in the Powsybl model.

OPEN class-attribute instance-attribute #

OPEN = True

The switch is open, i.e. not connected.

CLOSED class-attribute instance-attribute #

CLOSED = False

The switch is closed, i.e. connected.

Busbar #

Bases: BaseModel

Busbar data describing a single busbar a station.

grid_model_id instance-attribute #

grid_model_id

The unique identifier of the busbar. Corresponds to the busbar's id in the grid model.

type class-attribute instance-attribute #

type = None

The type of the busbar, might be useful for finding the busbar later on

name class-attribute instance-attribute #

name = None

The name of the busbar, might be useful for finding the busbar later on

int_id instance-attribute #

int_id

Is used to reference busbars in the couplers. Needs to be unique per station

in_service class-attribute instance-attribute #

in_service = True

Whether the busbar is in service. If False, it will be ignored in the switching table

bus_branch_bus_id class-attribute instance-attribute #

bus_branch_bus_id = None

The bus_branch_bus_id refers to the bus-branch model bus id. There might be a difference between the busbar grid_model_id (a physical busbar) and the bus_branch_bus_id from the bus-branch model. Use this bus_branch_bus_id to store the bus-branch model bus id. Note: the Station grid_model_id also a bus-branch bus_branch_bus_id. This id is the most splitable bus_branch_bus_id. Other bus_branch_bus_ids are part of the physical station, but are separated by a coupler or branch.

AssetBay #

Bases: BaseModel

Saves the physical connection from the asset to the substation busbars - a bay (Schaltfeld).

A line usually has three switches, before it is connected to the busbar. Two disconnector switches and one circuit breaker switch. A transformer usually has two switches, before it is connected to the busbar. One disconnector switch and one circuit breaker switch.

type: n - node type: b - busbar (Sammelschiene) type: CB - DV Circuit Breaker / Power Switch (Leistungsschalter) type: DS - Disconnector Switch (Trennschalter)

------------------ busbar 1 - type: b | / type: DS - SR Switch busbar 1 -> used for reassigning the asset to another busbar | ------|----------- busbar 2 - type: b | | / | type: DS - SR Switch busbar 2 -> used for reassigning the asset to another busbar | | --------- bus_3 - type: n - busbar section bus | / type: CB - DV Circuit Breaker / Power Switch -> used for disconnecting the asset from the busbar | --------- bus_2 - type: n - circuit breaker bus | / type: DS - SL Switch (optional) -> not used by the asset | --------- bus_1 - type: n - asset bus ^ | Line/Transformer

sl_switch_grid_model_id class-attribute instance-attribute #

sl_switch_grid_model_id = None

The id of the switch, which connects the asset to the circuit breaker node. This switch is a disconnector switch. Do not use for anything, leave state as found. Default should be closed.

dv_switch_grid_model_id instance-attribute #

dv_switch_grid_model_id

This switch is a circuit breaker / power switch. Use for disconnecting / reconnecting the asset from the busbar.

sr_switch_grid_model_id instance-attribute #

sr_switch_grid_model_id

The ids of the switches, which assign the asset to the busbars. key: busbar_grid_model_id e.g. 4%%bus value: sr_switch_grid_model_id This switch is a disconnector switch. Use for reassigning the asset to another busbar. Only one switch should be closed at a time.

check_is_empty classmethod #

check_is_empty(v)

Check if the dict is empty.

PARAMETER DESCRIPTION
v

The dictionary of sr_switch_grid_model_id to check.

TYPE: dict[str, str]

RETURNS DESCRIPTION
dict[str, str]

The dictionary itself.

RAISES DESCRIPTION
ValueError

If the dictionary is empty.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology.py
@field_validator("sr_switch_grid_model_id")
@classmethod
def check_is_empty(cls, v: dict[str, str]) -> dict[str, str]:
    """Check if the dict is empty.

    Parameters
    ----------
    v : dict[str, str]
        The dictionary of sr_switch_grid_model_id to check.

    Returns
    -------
    dict[str, str]
        The dictionary itself.

    Raises
    ------
    ValueError
        If the dictionary is empty.
    """
    if len(v) == 0:
        raise ValueError("sr_switch_grid_model_id must not be empty")
    return v

BusbarCoupler #

Bases: BaseModel

Coupler data describing a single coupler at a station.

This references only busbar couplers, i.e. couplers connecting two busbars. Switches connecting assets to a busbar are represented in the asset_switching_table in the station model.

Note: A busbar couple is a physical connection between two busbars, this can be also a cross coupler. To further specify the connection of an asset to a busbar, the asset connection

grid_model_id instance-attribute #

grid_model_id

The unique identifier of the coupler. Corresponds to the coupler's id in the grid model.

type class-attribute instance-attribute #

type = None

The type of the coupler, might be useful for finding the coupler later on

name class-attribute instance-attribute #

name = None

The name of the coupler, might be useful for finding the coupler later on

busbar_from_id instance-attribute #

busbar_from_id

Is used to determine where the coupler is connected to the busbars on the "from" side. Refers to the int_id of the busbar

busbar_to_id instance-attribute #

busbar_to_id

Is used to determine where the coupler is connected to the busbars on the "to" side. Refers to the int_id of the busbar

open instance-attribute #

open

The status of the coupler. True if the coupler is open, False if the coupler is closed. TODO: Switch to using the connectivity table instead of this field.

in_service class-attribute instance-attribute #

in_service = True

Whether the coupler is in-service. Out-of-service couplers are assumed to be always open

asset_bay class-attribute instance-attribute #

asset_bay = None

The asset bay (Schaltfeld) of the coupler. Note: A coupler can have multiple from and to busbars. The asset bay sr_switch_grid_model_id is used save the selector switches of the coupler. Note: A coupler has never a sl_switch_grid_model_id, the dv_switch_grid_model_id should the same as the name of the coupler.

SwitchableAsset #

Bases: BaseModel

Asset data describing a single asset at a station.

An asset can be for instance a transformer, line, generator, load, shunt. Note: An asset can be connected to multiple busbars through the switching grid, however if this happens a closed coupler between these busbars is assumed. If such couplers are not present, they will be created. Note: An asset that is out-of-service can be represented, but its switching entries will be ignored.

grid_model_id instance-attribute #

grid_model_id

The unique identifier of the asset. Corresponds to the asset's id in the grid model.

type class-attribute instance-attribute #

type = None

The type of the asset. These refer loosely to the types in the pandapower/powsybl grid models. If set, this can be used to disambiguate branches from injections

name class-attribute instance-attribute #

name = None

The name of the asset, might be useful for finding the asset later on

in_service class-attribute instance-attribute #

in_service = True

If the element is in service. False means the switching entry for this element will be ignored. This shall not be used for elements intentionally disconnected, instead set all zeros in the switching table.

branch_end class-attribute instance-attribute #

branch_end = None

If the asset was a branch, this can store which end of the branch was connected to the station in the original grid model. This can take the values "from", "to", "hv", "mv", "lv", where from/to works for lines and hv/mv/lv works for transformers. This should only be set if this is needed for the postprocessing, in theory a branch should be identifiable by the branch id and the station id. Injection-type assets like generators and loads should not have this set.

asset_bay class-attribute instance-attribute #

asset_bay = None

The asset bay (Schaltfeld) of the asset. The connection path is used to determine the physical connection of the asset to the busbar. None of these switches will be found in the network model, they are only used for the asset topology.

is_branch #

is_branch()

Return True if the asset is a branch.

Only works if the type is set. If type is not set this will return None.

RETURNS DESCRIPTION
bool

True if the asset is a branch, False if it is an injection.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology.py
def is_branch(self) -> Optional[bool]:
    """Return True if the asset is a branch.

    Only works if the type is set. If type is not set this will return None.

    Returns
    -------
    bool
        True if the asset is a branch, False if it is an injection.
    """
    if self.type is None:
        return None
    return self.type in get_args(AssetBranchType)

AssetSetpoint #

Bases: BaseModel

Asset data describing a single asset with a setpoint.

This could for example be a PST or HVDC setpoint. Note: The same asset can both be switchable and have a setpoint. In this case, the asset will be represented twice.

grid_model_id instance-attribute #

grid_model_id

The unique identifier of the asset. Corresponds to the asset's id in the grid model.

type class-attribute instance-attribute #

type = None

The type of the asset, might be useful for finding the asset later on

name class-attribute instance-attribute #

name = None

The name of the asset, might be useful for finding the asset later on

setpoint instance-attribute #

setpoint

The setpoint of the asset.

Station #

Bases: BaseModel

Station data describing a single station.

The minimal station model refers to a single bus-brach model bus_id, which contains a splitable bus. A physical representation may have multiple bus-brach model bus_ids.

grid_model_id instance-attribute #

grid_model_id

The unique identifier of the station. Corresponds to the stations's id in the grid model. Expects the bus-branch model bus_id, which is the most splitable bus_id.

name class-attribute instance-attribute #

name = None

The name of the station.

type class-attribute instance-attribute #

type = None

The type of the station.

region class-attribute instance-attribute #

region = None

The region of the station.

voltage_level class-attribute instance-attribute #

voltage_level = None

The voltage level of the station.

busbars instance-attribute #

busbars

The list of busbars at the station. The order of this list is the same order as the busbars in the switching table.

couplers instance-attribute #

couplers

The list of couplers at the station.

assets instance-attribute #

assets

The list of assets at the station. The order of this list is the same order as the assets in the asset_switching_table.

asset_switching_table instance-attribute #

asset_switching_table

Holds the switching of each asset to each busbar, shape (n_bus, n_asset).

An entry is true if the asset is connected to the busbar. Note: An asset can be connected to multiple busbars, in which case a closed coupler is assumed to be present between these busbars Note: An asset can be connected to none of the busbars. In this case, the asset is intentionally disconnected as part of a transmission line switching action. In practice, this usually involves a separate switch from the asset-to-busbar couplers, as each asset usually has a switch that completely disconnects it from the station. These switches are not modelled here, a postprocessing routine needs to do the translation to this physical layout. Do not use in_service for intentional disconnections.

asset_connectivity class-attribute instance-attribute #

asset_connectivity = None

Holds the all possible layouts of the asset_switching_table, shape (n_bus, n_asset).

An entry is true if it is possible to connect an asset to the busbar. If None, it is assumed that all branches can be connected to all busbars.

model_log class-attribute instance-attribute #

model_log = None

Holds log messages from the model creation process.

This can be used to store information about the model creation process, e.g. warnings or errors. A potential use case is to inform the user about data quality issues e.g. missing the Asset Bay switches.

check_int_id_unique classmethod #

check_int_id_unique(v)

Check if int_id is unique for all busbars.

PARAMETER DESCRIPTION
v

The list of busbars to check.

TYPE: list[Busbar]

RETURNS DESCRIPTION
list[Busbar]

The list of busbars.

RAISES DESCRIPTION
ValueError

If int_id is not unique for all busbars.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology.py
@field_validator("busbars")
@classmethod
def check_int_id_unique(cls, v: list[Busbar]) -> list[Busbar]:
    """Check if int_id is unique for all busbars.

    Parameters
    ----------
    v : list[Busbar]
        The list of busbars to check.

    Returns
    -------
    list[Busbar]
        The list of busbars.

    Raises
    ------
    ValueError
        If int_id is not unique for all busbars.
    """
    int_ids = [busbar.int_id for busbar in v]
    if len(int_ids) != len(set(int_ids)):
        raise ValueError("int_id must be unique for busbars")
    return v

check_coupler_busbars_different classmethod #

check_coupler_busbars_different(v)

Check if busbar_from_id and busbar_to_id are different for all couplers.

PARAMETER DESCRIPTION
v

The list of couplers to check.

TYPE: list[BusbarCoupler]

RETURNS DESCRIPTION
list[BusbarCoupler]

The list of couplers.

RAISES DESCRIPTION
ValueError

If busbar_from_id and busbar_to_id are the same for any coupler.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology.py
@field_validator("couplers")
@classmethod
def check_coupler_busbars_different(cls, v: list[BusbarCoupler]) -> list[BusbarCoupler]:
    """Check if busbar_from_id and busbar_to_id are different for all couplers.

    Parameters
    ----------
    v : list[BusbarCoupler]
        The list of couplers to check.

    Returns
    -------
    list[BusbarCoupler]
        The list of couplers.

    Raises
    ------
    ValueError
        If busbar_from_id and busbar_to_id are the same for any coupler.
    """
    for coupler in v:
        if coupler.busbar_from_id == coupler.busbar_to_id:
            raise ValueError(f"busbar_from_id and busbar_to_id must be different for coupler {coupler.grid_model_id}")
    return v

check_busbar_exists #

check_busbar_exists()

Check if all busbars in couplers exist in the busbars list.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology.py
@model_validator(mode="after")
def check_busbar_exists(self: "Station") -> "Station":
    """Check if all busbars in couplers exist in the busbars list."""
    busbar_ids = [busbar.int_id for busbar in self.busbars]
    for coupler in self.couplers:
        if coupler.busbar_from_id not in busbar_ids:
            raise ValueError(
                f"busbar_from_id {coupler.busbar_from_id} in coupler {coupler.grid_model_id} does not exist in busbars."
                f" Station_id: {self.grid_model_id}, Name: {self.name}"
            )
        if coupler.busbar_to_id not in busbar_ids:
            raise ValueError(
                f"busbar_to_id {coupler.busbar_to_id} in coupler {coupler.grid_model_id} does not exist in busbars"
                f" Station_id: {self.grid_model_id}, Name: {self.name}"
            )
    return self

check_coupler_references #

check_coupler_references()

Check if all closed couplers reference in-service busbars.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology.py
@model_validator(mode="after")
def check_coupler_references(self: "Station") -> "Station":
    """Check if all closed couplers reference in-service busbars."""
    busbar_state_map = {busbar.int_id: busbar.in_service for busbar in self.busbars}
    for coupler in self.couplers:
        if coupler.open or not coupler.in_service:
            continue
        if busbar_state_map[coupler.busbar_from_id] != busbar_state_map[coupler.busbar_to_id]:
            raise ValueError(
                f"Closed coupler {coupler.grid_model_id} connects out-of-service busbar with in-service busbar."
                f" Station_id: {self.grid_model_id}, Name: {self.name}"
            )
    return self

check_asset_switching_table_shape #

check_asset_switching_table_shape()

Check if the switching table shape matches the busbars and assets.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology.py
@model_validator(mode="after")
def check_asset_switching_table_shape(self: "Station") -> "Station":
    """Check if the switching table shape matches the busbars and assets."""
    if self.asset_switching_table.shape != (len(self.busbars), len(self.assets)):
        raise ValueError(
            f"asset_switching_table shape {self.asset_switching_table.shape} does not match busbars "
            f"{len(self.busbars)} and assets {len(self.assets)}"
            f" Station_id: {self.grid_model_id}, Name: {self.name}"
        )

    if self.asset_connectivity is not None:
        if self.asset_connectivity.shape != (len(self.busbars), len(self.assets)):
            raise ValueError(
                f"asset_connectivity shape {self.asset_connectivity.shape} does not match busbars "
                f"{len(self.busbars)} and assets {len(self.assets)}"
                f" Station_id: {self.grid_model_id}, Name: {self.name}"
            )

    return self

check_asset_switching_table_current_vs_physical #

check_asset_switching_table_current_vs_physical()

Check all current assignments are physically allowed.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology.py
@model_validator(mode="after")
def check_asset_switching_table_current_vs_physical(self: "Station") -> "Station":
    """Check all current assignments are physically allowed."""
    if self.asset_connectivity is not None:
        if np.logical_and(self.asset_switching_table, np.logical_not(self.asset_connectivity)).any():
            raise ValueError(
                f"Not all current assignments are physically allowed Station_id: {self.grid_model_id}, Name: {self.name}"
            )

    return self

check_asset_bay #

check_asset_bay()

Check if the asset bay bus is in busbars.

RETURNS DESCRIPTION
Station

The station itself.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology.py
@model_validator(mode="after")
def check_asset_bay(self: "Station") -> "Station":
    """Check if the asset bay bus is in busbars.

    Returns
    -------
    Station
        The station itself.
    """
    busbar_grid_model_id = [busbar.grid_model_id for busbar in self.busbars]
    for asset in self.assets:
        if asset.asset_bay is not None:
            for busbar_id in asset.asset_bay.sr_switch_grid_model_id.keys():
                if busbar_id not in busbar_grid_model_id:
                    raise ValueError(
                        f"busbar_id {busbar_id} in asset {asset.grid_model_id} does not exist in busbars"
                        f" Station_id: {self.grid_model_id}, Name: {self.name}"
                    )

    return self

check_bus_id #

check_bus_id()

Check if station grid_model_id is in the busbar.bus_branch_bus_id.

RETURNS DESCRIPTION
Station

The station itself.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology.py
@model_validator(mode="after")
def check_bus_id(self: "Station") -> "Station":
    """Check if station grid_model_id is in the busbar.bus_branch_bus_id.

    Returns
    -------
    Station
        The station itself.
    """
    busbar_grid_model_id = [busbar.bus_branch_bus_id for busbar in self.busbars if busbar.bus_branch_bus_id is not None]
    if len(busbar_grid_model_id) > 0 and self.grid_model_id not in busbar_grid_model_id:
        raise ValueError(
            f"Station grid_model_id {self.grid_model_id} does not exist in busbars bus_branch_bus_id"
            f" Station_id: {self.grid_model_id}, Name: {self.name}"
        )

    return self

__eq__ #

__eq__(other)

Check if two stations are equal.

PARAMETER DESCRIPTION
other

The other station to compare to.

TYPE: object

RETURNS DESCRIPTION
bool

True if the stations are equal, False otherwise.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology.py
def __eq__(self, other: object) -> bool:
    """Check if two stations are equal.

    Parameters
    ----------
    other : object
        The other station to compare to.

    Returns
    -------
    bool
        True if the stations are equal, False otherwise.
    """
    if not isinstance(other, Station):
        return False
    return (
        self.grid_model_id == other.grid_model_id
        and self.region == other.region
        and self.busbars == other.busbars
        and self.couplers == other.couplers
        and self.assets == other.assets
        and np.array_equal(self.asset_switching_table, other.asset_switching_table)
        and (
            np.array_equal(self.asset_connectivity, other.asset_connectivity)
            if (self.asset_connectivity is not None and other.asset_connectivity is not None)
            else self.asset_connectivity == other.asset_connectivity
        )
    )

Topology #

Bases: BaseModel

Topology data describing a single timestep topology.

A topology includes switchings for substations and potentially asset setpoints.

topology_id instance-attribute #

topology_id

The unique identifier of the topology.

grid_model_file class-attribute instance-attribute #

grid_model_file = None

The grid model file that represents this timestep. Note that relevant folders might only work on the machine they have been created, so some sort of permanent storage server should be used to keep these files globally accessible

name class-attribute instance-attribute #

name = None

The name of the topology.

stations instance-attribute #

stations

The list of stations in the topology.

asset_setpoints class-attribute instance-attribute #

asset_setpoints = None

The list of asset setpoints in the topology.

timestamp instance-attribute #

timestamp

The timestamp which is represented by this topology during the original optimization. I.e. if this timestep was the 5 o clock timestep on the day that was optimized, then this timestamp would read 5 o clock.

metrics class-attribute instance-attribute #

metrics = None

The metrics of the topology.

Strategy #

Bases: BaseModel

Timestep data describing a collection of single timesteps, each represented by a Topology.

strategy_id instance-attribute #

strategy_id

The unique identifier of the strategy.

timesteps instance-attribute #

timesteps

The list of topologies, one for every timestep.

name class-attribute instance-attribute #

name = None

The name of the strategy.

author class-attribute instance-attribute #

author = None

The author of the strategy, i.e. who has created it.

process_type class-attribute instance-attribute #

process_type = None

The process type that created this topology, e.g. DC-solver, DC+-solver, Human etc.

process_parameters class-attribute instance-attribute #

process_parameters = None

The process parameters that were used to create this topology.

date_of_creation class-attribute instance-attribute #

date_of_creation = None

The date of creation of this strategy, i.e. when the optimization ran.

metadata class-attribute instance-attribute #

metadata = None

Additional metadata that might be useful for the strategy.

RealizedStation #

Bases: BaseModel

A realized station, including the new station and the changes made to the original station

station instance-attribute #

station

The realized asset station object

coupler_diff instance-attribute #

coupler_diff

A list of couplers that have been switched.

reassignment_diff instance-attribute #

reassignment_diff

A list of reassignments that have been made. Each tuple contains the asset index that was affected (not the asset grid_model_id but the index into the asset_switching_table), the busbar index (again the index into the switching table) and whether the asset was connected (True) or disconnected (False) to that busbar.

disconnection_diff instance-attribute #

disconnection_diff

A list of disconnections that have been made. Each tuple contains the asset index that was disconnected.

RealizedTopology #

Bases: BaseModel

A realized topology, including the new topology and the changes made to the original topology.

This is similar to RealizedStation but holding information for all stations in the topology. The diffs are include a station identifier that shows which station in the topology was affected by the diff.

topology instance-attribute #

topology

The realized asset topology object

coupler_diff instance-attribute #

coupler_diff

A list of couplers that have been switched. Each tuple contains the station grid_model_id and the coupler that was switched.

reassignment_diff instance-attribute #

reassignment_diff

A list of reassignments that have been made. Each tuple contains the station grid_model_id, the asset index that was affected (not the asset grid_model_id but the index into the asset_switching_table), the busbar index (again the index into the switching table) and whether the asset was connected (True) or disconnected (False) to that busbar.

disconnection_diff instance-attribute #

disconnection_diff

A list of disconnections that have been made. Each tuple contains the station grid_model_id and the asset index that was disconnected. This can also include non-relevant stations.

Asset Topology Helper#

toop_engine_interfaces.asset_topology_helpers #

Collects some common helper functions for asset topology manipulation.

electrical_components #

electrical_components(station, min_num_assets=1)

Compute the electrical components of a station.

A set of busbars is considered a separate electrical component if it is not connected through a closed coupler to other busbars and there are at least two assets connected to the component.

PARAMETER DESCRIPTION
station

The station object to analyze.

TYPE: Station

min_num_assets

The minimum number of assets connected to a component to be considered a valid component, by default 1

TYPE: int DEFAULT: 1

RETURNS DESCRIPTION
list[list[int]]

A list of lists, where each inner list contains the indices of the busbars in the component indexing into the list of all busbars in the station.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def electrical_components(station: Station, min_num_assets: int = 1) -> list[list[int]]:
    """Compute the electrical components of a station.

    A set of busbars is considered a separate electrical component if it is not connected through a
    closed coupler to other busbars and there are at least two assets connected to the component.

    Parameters
    ----------
    station : Station
        The station object to analyze.
    min_num_assets : int, optional
        The minimum number of assets connected to a component to be considered a valid component, by default 1

    Returns
    -------
    list[list[int]]
        A list of lists, where each inner list contains the indices of the busbars in the component indexing into the list
        of all busbars in the station.
    """
    n_connections_per_bus = station.asset_switching_table.sum(axis=1)

    int_id_mapper = {busbar.int_id: i for i, busbar in enumerate(station.busbars)}

    graph = nx.Graph()
    graph.add_nodes_from(
        [(busbar.int_id, {"degree": degree}) for busbar, degree in zip(station.busbars, n_connections_per_bus, strict=True)]
    )
    graph.add_edges_from(
        [(coupler.busbar_from_id, coupler.busbar_to_id) for coupler in station.couplers if not coupler.open]
    )

    components = nx.connected_components(graph)
    # Filter out busbars with no assets connected to them
    components = [
        list(component)
        for component in components
        if sum(graph.nodes[busbar]["degree"] for busbar in component) >= min_num_assets
    ]
    # Map int ids
    components = [[int_id_mapper[busbar] for busbar in component] for component in components]

    return components

number_of_splits #

number_of_splits(station)

Compute the number of electrical components that are present in a station.

A set of busbars is considered a separate electrical component if it is not connected through a closed coupler to other busbars and there are at least two assets connected to the component.

PARAMETER DESCRIPTION
station

The station object to analyze.

TYPE: Station

RETURNS DESCRIPTION
int

The number of electrical components in the station.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def number_of_splits(station: Station) -> int:
    """Compute the number of electrical components that are present in a station.

    A set of busbars is considered a separate electrical component if it is not connected through a
    closed coupler to other busbars and there are at least two assets connected to the component.

    Parameters
    ----------
    station : Station
        The station object to analyze.

    Returns
    -------
    int
        The number of electrical components in the station.
    """
    station = filter_out_of_service(station)

    components = electrical_components(station, min_num_assets=2)
    return len(components)

remove_busbar #

remove_busbar(station, grid_model_id)

Remove a busbar with a specific grid_model_id from the station.

This will - remove the busbar from the list of busbars - remove all couplers that are connected to the busbar at either end - remove all asset bay entries that are connected to the busbar - remove the line from the asset switching table - remove the line from the asset connectivity table

PARAMETER DESCRIPTION
station

The station object to modify.

TYPE: Station

grid_model_id

The grid_model_id of the busbar to remove.

TYPE: str

RETURNS DESCRIPTION
Station

The modified station object with the busbar removed.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def remove_busbar(station: Station, grid_model_id: str) -> Station:
    """Remove a busbar with a specific grid_model_id from the station.

    This will
    - remove the busbar from the list of busbars
    - remove all couplers that are connected to the busbar at either end
    - remove all asset bay entries that are connected to the busbar
    - remove the line from the asset switching table
    - remove the line from the asset connectivity table

    Parameters
    ----------
    station : Station
        The station object to modify.
    grid_model_id : str
        The grid_model_id of the busbar to remove.

    Returns
    -------
    Station
        The modified station object with the busbar removed.
    """
    # Store the index and int_id of the dropped busbar
    index = [b.grid_model_id for b in station.busbars].index(grid_model_id)
    int_id = station.busbars[index].int_id

    busbars = [b for b in station.busbars if b.grid_model_id != grid_model_id]
    couplers = [c for c in station.couplers if int_id not in (c.busbar_from_id, c.busbar_to_id)]
    asset_switching_table = np.delete(station.asset_switching_table, index, axis=0)
    asset_connectivity = (
        np.delete(station.asset_connectivity, index, axis=0) if station.asset_connectivity is not None else None
    )

    def filter_sr_keys(asset: SwitchableAsset) -> SwitchableAsset:
        """Filter out the sr keys from the asset bay"""
        if asset.asset_bay is None:
            return asset
        return asset.model_copy(
            update={
                "asset_bay": asset.asset_bay.model_copy(
                    update={
                        "sr_switch_grid_model_id": {
                            busbar_id: foreign_id
                            for busbar_id, foreign_id in asset.asset_bay.sr_switch_grid_model_id.items()
                            if busbar_id != grid_model_id
                        }
                    }
                )
            }
        )

    assets = [filter_sr_keys(a) for a in station.assets]

    # Create a new station object with the modified busbars, couplers, and asset switching table
    new_station = station.model_copy(
        update={
            "busbars": busbars,
            "couplers": couplers,
            "assets": assets,
            "asset_switching_table": asset_switching_table,
            "asset_connectivity": asset_connectivity,
        }
    )
    return new_station

filter_out_of_service_assets #

filter_out_of_service_assets(station)

Filter out-of-service assets from the station.

PARAMETER DESCRIPTION
station

The station object to filter.

TYPE: Station

RETURNS DESCRIPTION
Station

The new station object with all out-of-service assets removed.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def filter_out_of_service_assets(station: Station) -> Station:
    """Filter out-of-service assets from the station.

    Parameters
    ----------
    station : Station
        The station object to filter.

    Returns
    -------
    Station
        The new station object with all out-of-service assets removed.
    """
    if all(asset.in_service for asset in station.assets):
        return station

    in_service_assets = [asset.in_service for asset in station.assets]

    return station.model_copy(
        update={
            "assets": [asset for asset in station.assets if asset.in_service],
            "asset_switching_table": station.asset_switching_table[:, in_service_assets],
            "asset_connectivity": station.asset_connectivity[:, in_service_assets]
            if station.asset_connectivity is not None
            else None,
        }
    )

filter_out_of_service_busbars #

filter_out_of_service_busbars(station)

Filter out-of-service busbars from the station.

PARAMETER DESCRIPTION
station

The station object to filter.

TYPE: Station

RETURNS DESCRIPTION
Station

The new station object with all out-of-service busbars removed.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def filter_out_of_service_busbars(station: Station) -> Station:
    """Filter out-of-service busbars from the station.

    Parameters
    ----------
    station : Station
        The station object to filter.

    Returns
    -------
    Station
        The new station object with all out-of-service busbars removed.
    """
    deleted_busbar_ids = [busbar.grid_model_id for busbar in station.busbars if not busbar.in_service]

    for busbar in deleted_busbar_ids:
        station = remove_busbar(station, busbar)

    return station

filter_out_of_service_couplers #

filter_out_of_service_couplers(station)

Filter out-of-service couplers from the station.

PARAMETER DESCRIPTION
station

The station object to filter.

TYPE: Station

RETURNS DESCRIPTION
Station

The new station object with all out-of-service couplers removed.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def filter_out_of_service_couplers(station: Station) -> Station:
    """Filter out-of-service couplers from the station.

    Parameters
    ----------
    station : Station
        The station object to filter.

    Returns
    -------
    Station
        The new station object with all out-of-service couplers removed.
    """
    if all(coupler.in_service for coupler in station.couplers):
        return station

    return station.model_copy(
        update={
            "couplers": [coupler for coupler in station.couplers if coupler.in_service],
        }
    )

filter_out_of_service #

filter_out_of_service(station)

Filter out-of-service assets, busbars and couplers from the station.

The return value will be a new station object with all out-of-service elements removed. Note that the busbars are not reindexed, so the busbar ids will be the same as in the original station with missing elements. If you expect a continuous range of busbar ids, you should call reindex_busbars after this function.

PARAMETER DESCRIPTION
station

The station object to filter.

TYPE: Station

RETURNS DESCRIPTION
Station

The new station object with all out-of-service assets removed.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def filter_out_of_service(station: Station) -> Station:
    """Filter out-of-service assets, busbars and couplers from the station.

    The return value will be a new station object with all out-of-service elements removed.
    Note that the busbars are not reindexed, so the busbar ids will be the same as in the original
    station with missing elements. If you expect a continuous range of busbar ids, you
    should call reindex_busbars after this function.

    Parameters
    ----------
    station : Station
        The station object to filter.

    Returns
    -------
    Station
        The new station object with all out-of-service assets removed.
    """
    station = filter_out_of_service_couplers(station)
    station = filter_out_of_service_assets(station)
    station = filter_out_of_service_busbars(station)

    # Validate the new station object
    Station.model_validate(station)

    return station

filter_duplicate_couplers #

filter_duplicate_couplers(
    station, retain_type_hierarchy=None
)

Filter out duplicate couplers

Two couplers are considered duplicates if they connect the same busbars, regardless of their order. If a duplicate coupler is found, only the first one is kept.

PARAMETER DESCRIPTION
station

The station object to filter.

TYPE: Station

retain_type_hierarchy

If provided, not the first coupler is kept but the one with the highest type in the hierarchy list. Highest means that the type is the first in the list. If not provided, the first coupler is kept, by default None

TYPE: Optional[list[str]] DEFAULT: None

RETURNS DESCRIPTION
Station

The new station object with duplicate couplers removed.

list[BusbarCoupler]

The list of removed couplers.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def filter_duplicate_couplers(
    station: Station, retain_type_hierarchy: Optional[list[str]] = None
) -> tuple[Station, list[BusbarCoupler]]:
    """Filter out duplicate couplers

    Two couplers are considered duplicates if they connect the same busbars, regardless of their
    order. If a duplicate coupler is found, only the first one is kept.

    Parameters
    ----------
    station : Station
        The station object to filter.
    retain_type_hierarchy : Optional[list[str]], optional
        If provided, not the first coupler is kept but the one with the highest type in the hierarchy list. Highest means
        that the type is the first in the list. If not provided, the first coupler is kept, by default None

    Returns
    -------
    Station
        The new station object with duplicate couplers removed.
    list[BusbarCoupler]
        The list of removed couplers.
    """
    # A dict with the coupler representation as key and a list of indices of the couplers
    coupler_dict: dict[tuple[int, int], list[int]] = {}
    for index, coupler in enumerate(station.couplers):
        coupler_repr = (
            min(coupler.busbar_from_id, coupler.busbar_to_id),
            max(coupler.busbar_from_id, coupler.busbar_to_id),
        )
        coupler_dict[coupler_repr] = [*coupler_dict.get(coupler_repr, []), index]

    kept_couplers = []
    removed_couplers = []
    for _coupler_repr, index in coupler_dict.items():
        # If there is only one or we don't have to sort, take the first and remove all the others
        if len(index) == 1 or retain_type_hierarchy is None:
            sorted_couplers = [station.couplers[i] for i in index]
        # We have to sort by type hierarchy
        else:
            # Sort the couplers by their type in the hierarchy, if the type is not in the hierarchy, it will be at the end
            sorted_couplers = sorted(
                (station.couplers[i] for i in index),
                key=lambda c: retain_type_hierarchy.index(c.type)
                if c.type in retain_type_hierarchy
                else len(retain_type_hierarchy),
            )
        # Keep the first coupler and remove the others
        kept_couplers.append(sorted_couplers[0])
        removed_couplers.extend(sorted_couplers[1:])

    if len(removed_couplers) == 0:
        return station, removed_couplers

    return (
        station.model_copy(update={"couplers": kept_couplers}),
        removed_couplers,
    )

filter_disconnected_busbars #

filter_disconnected_busbars(
    station, respect_coupler_open=False
)

Remove busbars that can not get connected by any coupler.

This creates a graph of the busbars and couplers and returns only the largest connected component. The size of a component is determined by the number of assets connected to it. Open and closed couplers are treated the same if respect_coupler_open is False, i.e. a busbar connected by only open couplers is considered connected. Busbars connected by out-of-service couplers are always considered disconnected.

This also means that elements which are only connected to the disconnected busbars are effectively disconnected, and it looks like they are subject to transmission line switching.

Note that this function does not reindex the busbars, so the busbar ids will be the same as in the original station with missing elements. If you expect a continuous range of busbar ids, you should call reindex_busbars after this function.

PARAMETER DESCRIPTION
station

The station object potentially with disconnected busbars.

TYPE: Station

respect_coupler_open

If True, only closed couplers are considered connected, if False, all couplers are considered connected, by default False

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
Station

The new station object with disconnected busbars removed.

list[Busbar]

The list of removed busbars.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def filter_disconnected_busbars(station: Station, respect_coupler_open: bool = False) -> tuple[Station, list[Busbar]]:
    """Remove busbars that can not get connected by any coupler.

    This creates a graph of the busbars and couplers and returns only the largest connected component. The size
    of a component is determined by the number of assets connected to it.
    Open and closed couplers are treated the same if respect_coupler_open is False, i.e. a busbar connected
    by only open couplers is considered connected.
    Busbars connected by out-of-service couplers are always considered disconnected.

    This also means that elements which are only connected to the disconnected busbars are
    effectively disconnected, and it looks like they are subject to transmission line switching.

    Note that this function does not reindex the busbars, so the busbar ids will be the same as in
    the original station with missing elements. If you expect a continuous range of busbar ids, you
    should call reindex_busbars after this function.

    Parameters
    ----------
    station : Station
        The station object potentially with disconnected busbars.
    respect_coupler_open : bool, optional
        If True, only closed couplers are considered connected, if False, all couplers are
        considered connected, by default False

    Returns
    -------
    Station
        The new station object with disconnected busbars removed.
    list[Busbar]
        The list of removed busbars.
    """
    couplers = [
        coupler for coupler in station.couplers if (not respect_coupler_open or not coupler.open) and coupler.in_service
    ]
    graph = nx.Graph()
    num_assets_per_busbar = station.asset_switching_table.sum(axis=1)
    graph.add_nodes_from(
        [
            (busbar.int_id, {"num_assets": num_assets})
            for (busbar, num_assets) in zip(station.busbars, num_assets_per_busbar, strict=True)
        ]
    )
    graph.add_edges_from([(coupler.busbar_from_id, coupler.busbar_to_id) for coupler in couplers])

    components = list(nx.connected_components(graph))
    if len(components) == 1:
        return station, []

    # Order components by the number of assets connected to them
    components.sort(key=lambda x: sum(graph.nodes[busbar]["num_assets"] for busbar in x), reverse=True)

    removed_busbars = [busbar for busbar in station.busbars if busbar.int_id not in components[0]]

    for busbar in removed_busbars:
        station = remove_busbar(station, busbar.grid_model_id)

    return station, removed_busbars

reindex_busbars #

reindex_busbars(station)

Reindex the int-ids of the busbars in the station

This might be necessary after filder_disconnected_busbars or filter_out_of_service have been called.

PARAMETER DESCRIPTION
station

The station object with possible inconsistent busbar ids.

TYPE: Station

RETURNS DESCRIPTION
Station

The new station object with reindexed busbars, where int-ids are continuous and start at 0.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def reindex_busbars(station: Station) -> Station:
    """Reindex the int-ids of the busbars in the station

    This might be necessary after filder_disconnected_busbars or filter_out_of_service have been called.

    Parameters
    ----------
    station : Station
        The station object with possible inconsistent busbar ids.

    Returns
    -------
    Station
        The new station object with reindexed busbars, where int-ids are continuous and start at 0.
    """
    busbar_mapping = {busbar.int_id: i for i, busbar in enumerate(station.busbars)}
    new_busbars = [busbar.model_copy(update={"int_id": i}) for i, busbar in enumerate(station.busbars)]
    new_couplers = [
        coupler.model_copy(
            update={
                "busbar_from_id": busbar_mapping[coupler.busbar_from_id],
                "busbar_to_id": busbar_mapping[coupler.busbar_to_id],
            }
        )
        for coupler in station.couplers
    ]

    station = station.model_copy(update={"busbars": new_busbars, "couplers": new_couplers})
    Station.model_validate(station)
    return station

filter_assets_by_type #

filter_assets_by_type(
    station, assets_allowed, allow_none_type=False
)

Filter assets by type

Removes all assets that have a type which is not in the set of allowed types.

PARAMETER DESCRIPTION
station

The station object to filter.

TYPE: Station

assets_allowed

The set of asset types that are allowed.

TYPE: set[str]

allow_none_type

If True, assets without a type are allowed, by default False

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
Station

The new station object with assets of the wrong type removed.

list[SwitchableAsset]

The list of removed assets.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def filter_assets_by_type(
    station: Station, assets_allowed: set[str], allow_none_type: bool = False
) -> tuple[Station, list[SwitchableAsset]]:
    """Filter assets by type

    Removes all assets that have a type which is not in the set of allowed types.

    Parameters
    ----------
    station : Station
        The station object to filter.
    assets_allowed : set[str]
        The set of asset types that are allowed.
    allow_none_type : bool, optional
        If True, assets without a type are allowed, by default False

    Returns
    -------
    Station
        The new station object with assets of the wrong type removed.
    list[SwitchableAsset]
        The list of removed assets.
    """
    asset_mask = [(asset.type in assets_allowed) or (allow_none_type and asset.type is None) for asset in station.assets]
    if all(asset_mask):
        return station, []

    removed_assets = [asset for asset, mask in zip(station.assets, asset_mask, strict=True) if not mask]
    kept_assets = [asset for asset, mask in zip(station.assets, asset_mask, strict=True) if mask]

    new_station = station.model_copy(
        update={
            "assets": kept_assets,
            "asset_switching_table": station.asset_switching_table[:, asset_mask],
            "asset_connectivity": station.asset_connectivity[:, asset_mask]
            if station.asset_connectivity is not None
            else None,
        }
    )
    return new_station, removed_assets

find_multi_connected_without_coupler #

find_multi_connected_without_coupler(station)

Find assets bridging multiple busbars without a coupler in between

These cases can cause a bug in the downstream functions

PARAMETER DESCRIPTION
station

The station object to check.

TYPE: Station

RETURNS DESCRIPTION
list[tuple[int, int, int]]

A list of tuples containing the index of the multi-connected asset and the indices of the two busbars it is bridging. Only returns bridges that don't have a coupler in between. The first busbar index will always be lower than the second, hence a routine which always removes the first doesn't run into double removals if an asset appears multiple times.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def find_multi_connected_without_coupler(
    station: Station,
) -> list[tuple[int, int, int]]:
    """Find assets bridging multiple busbars without a coupler in between

    These cases can cause a bug in the downstream functions

    Parameters
    ----------
    station : Station
        The station object to check.

    Returns
    -------
    list[tuple[int, int, int]]
        A list of tuples containing the index of the multi-connected asset and the indices of the
        two busbars it is bridging.
        Only returns bridges that don't have a coupler in between.
        The first busbar index will always be lower than the second, hence a routine which always
        removes the first doesn't run into double removals if an asset appears multiple times.
    """
    multi_connected_without_coupler = []
    for asset_idx in np.flatnonzero(station.asset_switching_table.sum(axis=0) > 1):
        busbars_bridged = np.flatnonzero(station.asset_switching_table[:, asset_idx]).tolist()
        for bus1_idx, bus2_idx in itertools.combinations(busbars_bridged, 2):
            # bus1_idx shall always be the smaller of the two
            if bus1_idx > bus2_idx:
                smaller_bus_idx, larger_bus_idx = bus2_idx, bus1_idx
            else:
                smaller_bus_idx, larger_bus_idx = bus1_idx, bus2_idx

            if not any(
                (
                    coupler.busbar_from_id == station.busbars[smaller_bus_idx].int_id
                    and coupler.busbar_to_id == station.busbars[larger_bus_idx].int_id
                )
                or (
                    coupler.busbar_from_id == station.busbars[larger_bus_idx].int_id
                    and coupler.busbar_to_id == station.busbars[smaller_bus_idx].int_id
                )
                for coupler in station.couplers
            ):
                multi_connected_without_coupler.append(
                    (
                        asset_idx,
                        smaller_bus_idx,
                        larger_bus_idx,
                    )
                )

    return multi_connected_without_coupler

fix_multi_connected_without_coupler #

fix_multi_connected_without_coupler(station)

Remove one connection for multi-connected assets without a coupler in between

Will always remove the connection to the busbar with the lower index.

PARAMETER DESCRIPTION
station

The station object to fix

TYPE: Station

RETURNS DESCRIPTION
Station

The new station object with the multi-connected assets fixed.

list[tuple[SwitchableAsset, Busbar, Busbar]]

A list of tuples containing the previously multi-connected assets and the busbars they were connected to. The first busbar in the tuple is the one that was disconnected.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def fix_multi_connected_without_coupler(
    station: Station,
) -> tuple[Station, list[tuple[SwitchableAsset, Busbar, Busbar]]]:
    """Remove one connection for multi-connected assets without a coupler in between

    Will always remove the connection to the busbar with the lower index.

    Parameters
    ----------
    station : Station
        The station object to fix

    Returns
    -------
    Station
        The new station object with the multi-connected assets fixed.
    list[tuple[SwitchableAsset, Busbar, Busbar]]
        A list of tuples containing the previously multi-connected assets and the busbars they were
        connected to. The first busbar in the tuple is the one that was disconnected.
    """
    multi_connected_without_coupler = find_multi_connected_without_coupler(station)
    if not multi_connected_without_coupler:
        return station, []

    new_asset_switching_table = np.copy(station.asset_switching_table)
    diff = []
    for asset_idx, bus1_idx, bus2_idx in multi_connected_without_coupler:
        new_asset_switching_table[bus1_idx, asset_idx] = 0
        diff.append(
            (
                station.assets[asset_idx],
                station.busbars[bus1_idx],
                station.busbars[bus2_idx],
            )
        )

    return station.model_copy(update={"asset_switching_table": new_asset_switching_table}), diff

has_transmission_line_switching #

has_transmission_line_switching(station)

Check if the switching table contains transmission line switching

Transmission line switching is defined as disconnecting an asset from all busbars on purpose as a remedial action. Out-of-service assets are not considered irrespective of the switching table.

PARAMETER DESCRIPTION
station

The station object to check.

TYPE: Station

RETURNS DESCRIPTION
bool

True if the switching table contains transmission line switching, False otherwise.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def has_transmission_line_switching(station: Station) -> bool:
    """Check if the switching table contains transmission line switching

    Transmission line switching is defined as disconnecting an asset from all busbars on purpose as
    a remedial action. Out-of-service assets are not considered irrespective of the switching table.

    Parameters
    ----------
    station : Station
        The station object to check.

    Returns
    -------
    bool
        True if the switching table contains transmission line switching, False otherwise.
    """
    in_service = np.array([asset.in_service for asset in station.assets])
    return np.any((station.asset_switching_table.sum(axis=0) == 0) & in_service).item()

find_busbars_for_coupler #

find_busbars_for_coupler(busbars, coupler)

Find the from and two busbars for a coupler

Finds based on the int_id of the busbars.

PARAMETER DESCRIPTION
busbars

The list of busbars to search in

TYPE: list[Busbar]

coupler

The coupler to search for

TYPE: BusbarCoupler

RETURNS DESCRIPTION
Busbar

The from busbar

Busbar

The to busbar

RAISES DESCRIPTION
ValueError

If any of the busbars for the coupler are not found. This should never happen as the station validator should capture such a scenario.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def find_busbars_for_coupler(busbars: list[Busbar], coupler: BusbarCoupler) -> tuple[Busbar, Busbar]:
    """Find the from and two busbars for a coupler

    Finds based on the int_id of the busbars.

    Parameters
    ----------
    busbars : list[Busbar]
        The list of busbars to search in
    coupler : BusbarCoupler
        The coupler to search for

    Returns
    -------
    Busbar
        The from busbar
    Busbar
        The to busbar

    Raises
    ------
    ValueError
        If any of the busbars for the coupler are not found. This should never happen as the station validator should
        capture such a scenario.
    """
    try:
        busbar_from = next(busbar for busbar in busbars if busbar.int_id == coupler.busbar_from_id)
        busbar_to = next(busbar for busbar in busbars if busbar.int_id == coupler.busbar_to_id)
        return busbar_from, busbar_to
    except StopIteration as e:
        raise ValueError(f"Busbars for coupler {coupler.grid_model_id} not found") from e

merge_couplers #

merge_couplers(original, new, busbar_mapping)

Merge an updated list of couplers into the original list

Due to preprocessing actions, the new list might contain a subset of the original couplers. Especially duplicate couplers were removed, so if the original list contains duplicates, both couplers need to be switched. It assumes that the busbar ids are mapped through the busbar mapping

If the new list contains duplicates, the couplers are counted as open if all duplicates are open and as closed otherwise

PARAMETER DESCRIPTION
original

The original list of couplers of the unprocessed station

TYPE: list[BusbarCoupler]

new

The new list of couplers of the processed station after applying the topology

TYPE: list[BusbarCoupler]

busbar_mapping

The mapping from the original busbar indices to the new busbar indices

TYPE: dict[int, int]

RETURNS DESCRIPTION
list[BusbarCoupler]

The updated list of couplers

list[BusbarCoupler]

The coupler diff in this station, i.e. which couplers have been switched. Stores the new state of the couplers, i.e. which state the coupler has been switched to

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def merge_couplers(
    original: list[BusbarCoupler],
    new: list[BusbarCoupler],
    busbar_mapping: dict[int, int],
) -> tuple[list[BusbarCoupler], list[BusbarCoupler]]:
    """Merge an updated list of couplers into the original list

    Due to preprocessing actions, the new list might contain a subset of the original couplers.
    Especially duplicate couplers were removed, so if the original list contains duplicates, both
    couplers need to be switched. It assumes that the busbar ids are mapped through the busbar
    mapping

    If the new list contains duplicates, the couplers are counted as open if all duplicates are open
    and as closed otherwise

    Parameters
    ----------
    original : list[BusbarCoupler]
        The original list of couplers of the unprocessed station
    new : list[BusbarCoupler]
        The new list of couplers of the processed station after applying the topology
    busbar_mapping : dict[int, int]
        The mapping from the original busbar indices to the new busbar indices

    Returns
    -------
    list[BusbarCoupler]
        The updated list of couplers
    list[BusbarCoupler]
        The coupler diff in this station, i.e. which couplers have been switched. Stores the
        new state of the couplers, i.e. which state the coupler has been switched to
    """
    # Store the couplers in a dict for easier access
    target_state = {}
    for coupler in new:
        key = (coupler.busbar_from_id, coupler.busbar_to_id)
        # There can be a case with multiple couplers
        # In that case, the coupler is open if all are open
        # If no other coupler is present, the coupler state is just copied
        target_state[key] = coupler.open and target_state.get(key, True)

    new_couplers = []
    diff = []
    for coupler in original:
        key = (
            busbar_mapping[coupler.busbar_from_id],
            busbar_mapping[coupler.busbar_to_id],
        )
        if key in target_state and target_state[key] != coupler.open:
            new_coupler = coupler.model_copy(update={"open": target_state[key]})
            new_couplers.append(new_coupler)
            diff.append(new_coupler)
        else:
            new_couplers.append(coupler)

    return new_couplers, diff

merge_stations #

merge_stations(
    original, new, missing_station_behavior="append"
)

Merge a list of changed stations into a list of original stations

All stations with a grid_model_id that is present in the new list will be updated using merge_station. The coupler+reassignment diffs will be concatenated. If a new station is not present in the original list, it will be appended to the end of the list if missing_station_behavior is "append".

PARAMETER DESCRIPTION
original

The original list of stations

TYPE: list[Station]

new

The list of changed stations

TYPE: list[Station]

missing_station_behavior

What to do if a station is not found in the original list, by default "append"

TYPE: Literal[append, 'raise'] DEFAULT: 'append'

RETURNS DESCRIPTION
list[Station]

The updated list of stations

list[tuple[str, BusbarCoupler]]

The coupler diff that has been switched, consisting of tuples with the station grid_model_id and the coupler that has been switched

list[tuple[str, int, int, bool]]

The reassignment diff that has been switched, consisting of tuples with the station grid_model_id, the asset index that was affected, which busbar index was affected and whether the asset was connected (True) or disconnected (False) to that bus

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def merge_stations(
    original: list[Station],
    new: list[Station],
    missing_station_behavior: Literal["append", "raise"] = "append",
) -> tuple[list[Station], list[tuple[str, BusbarCoupler]], list[tuple[str, int, int, bool]]]:
    """Merge a list of changed stations into a list of original stations

    All stations with a grid_model_id that is present in the new list will be updated using
    merge_station. The coupler+reassignment diffs will be concatenated. If a new station is not
    present in the original list, it will be appended to the end of the list if
    missing_station_behavior is "append".

    Parameters
    ----------
    original : list[Station]
        The original list of stations
    new : list[Station]
        The list of changed stations
    missing_station_behavior : Literal["append", "raise"], optional
        What to do if a station is not found in the original list, by default "append"

    Returns
    -------
    list[Station]
        The updated list of stations
    list[tuple[str, BusbarCoupler]]
        The coupler diff that has been switched, consisting of tuples with the station grid_model_id
        and the coupler that has been switched
    list[tuple[str, int, int, bool]]
        The reassignment diff that has been switched, consisting of tuples with the station grid_model_id,
        the asset index that was affected, which busbar index was affected and whether the asset was
        connected (True) or disconnected (False) to that bus
    """
    new_stations_found = []
    updated_station_list = []
    coupler_diff = []
    reassignment_diff = []
    for station in original:
        found = False
        for new_station in new:
            if station.grid_model_id == new_station.grid_model_id:
                updated_station, coupler_diff_local, reassignment_diff_local = merge_station(station, new_station)
                updated_station_list.append(updated_station)
                new_stations_found.append(new_station.grid_model_id)
                coupler_diff.extend([(station.grid_model_id, coupler) for coupler in coupler_diff_local])
                reassignment_diff.extend(
                    [
                        (station.grid_model_id, asset_idx, busbar_idx, bool(connected))
                        for asset_idx, busbar_idx, connected in reassignment_diff_local
                    ]
                )
                found = True
                break
        if not found:
            updated_station_list.append(station)

    # Check if there are new stations that were not found in the original list
    for new_station in new:
        if new_station.grid_model_id not in new_stations_found:
            if missing_station_behavior == "append":
                updated_station_list.append(new_station)
            else:
                raise ValueError(f"Station {new_station.grid_model_id} was not found in the original list")

    return updated_station_list, coupler_diff, reassignment_diff

merge_station #

merge_station(original, new)

Merge all the changes from the new station into the original station

This will overwrite all assets, couplers and busbars in the original station with the ones from the new station if the couplers are also found in the new station. Things that are not found in the new station will be left untouched. Assets that are in the new station but not in the original will also be left untouched.

If all in-service elements of original are also in new, then the returned substation will be electrically equivalent to the new substation. If this is not the case, the returned substation has all possible changes applied, but there are cases in which this is not electrically equivalent to the new substation.

You can use asset_topology_helpers.compare_stations to check which elements are different in the two stations and infer the differences.

PARAMETER DESCRIPTION
original

The original station that should be modified

TYPE: Station

new

The new station that contains the changes that should be merged into the original station

TYPE: Station

RETURNS DESCRIPTION
Station

The modified original station, with all the changes from the new station merged in

list[BusbarCoupler]

The coupler diff that has been switched

list[tuple[int, int, bool]]

The asset diff that has been switched. Each tuple contains the asset index that was affected, which busbar index was affected and whether the asset was connected (True) or disconnected (False) to that bus

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def merge_station(original: Station, new: Station) -> tuple[Station, list[BusbarCoupler], list[tuple[int, int, bool]]]:  # noqa: PLR0912, C901
    """Merge all the changes from the new station into the original station

    This will overwrite all assets, couplers and busbars in the original station with the ones from
    the new station if the couplers are also found in the new station. Things that are not found in
    the new station will be left untouched. Assets that are in the new station but not in the
    original will also be left untouched.

    If all in-service elements of original are also in new, then the returned substation will be
    electrically equivalent to the new substation. If this is not the case, the returned substation
    has all possible changes applied, but there are cases in which this is not electrically
    equivalent to the new substation.

    You can use asset_topology_helpers.compare_stations to check which elements are different
    in the two stations and infer the differences.

    Parameters
    ----------
    original : Station
        The original station that should be modified
    new : Station
        The new station that contains the changes that should be merged into the original station

    Returns
    -------
    Station
        The modified original station, with all the changes from the new station merged in
    list[BusbarCoupler]
        The coupler diff that has been switched
    list[tuple[int, int, bool]]
        The asset diff that has been switched. Each tuple contains the asset index that was
        affected, which busbar index was affected and whether the asset was connected (True) or
        disconnected (False) to that bus
    """
    if original == new:
        return original, [], []

    # Find a mapping from old busbars to new busbars. We just need an index mapping for copying the
    # switching table. Also store the missed busbars.
    busbar_mapping = {}
    busbar_int_id_mapping = {}
    max_busbar_id = max(busbar.int_id for busbar in new.busbars)
    for i, busbar in enumerate(original.busbars):
        found = False
        for j, other in enumerate(new.busbars):
            if busbar.grid_model_id == other.grid_model_id:
                busbar_mapping[i] = j
                busbar_int_id_mapping[busbar.int_id] = other.int_id
                found = True
                break
        if not found:
            # Make sure to not accidentally map to an existing busbar
            busbar_int_id_mapping[busbar.int_id] = busbar.int_id + max_busbar_id + 1

    # Same for the switchable assets
    asset_mapping = {}
    for i, asset in enumerate(original.assets):
        for j, other in enumerate(new.assets):
            if asset.grid_model_id == other.grid_model_id:
                asset_mapping[i] = j
                break

    # Merge couplers
    new_couplers, coupler_diff = merge_couplers(original.couplers, new.couplers, busbar_mapping=busbar_int_id_mapping)

    # Loop through the switching table and copy the values over for which there is a mapping
    asset_diff = []
    new_asset_switching_table = original.asset_switching_table.copy()
    for busbar_idx in range(original.asset_switching_table.shape[0]):
        if busbar_idx in busbar_mapping:
            mapped_busbar_idx = busbar_mapping[busbar_idx]
            for asset_idx in range(original.asset_switching_table.shape[1]):
                if asset_idx in asset_mapping:
                    mapped_asset_idx = asset_mapping[asset_idx]

                    if (
                        new_asset_switching_table[busbar_idx, asset_idx]
                        != new.asset_switching_table[mapped_busbar_idx, mapped_asset_idx]
                    ):
                        asset_diff.append(
                            (
                                asset_idx,
                                busbar_idx,
                                new.asset_switching_table[mapped_busbar_idx, mapped_asset_idx],
                            )
                        )

                        new_asset_switching_table[busbar_idx, asset_idx] = new.asset_switching_table[
                            mapped_busbar_idx, mapped_asset_idx
                        ]

    station = original.model_copy(update={"couplers": new_couplers, "asset_switching_table": new_asset_switching_table})
    return station, coupler_diff, asset_diff

compare_stations #

compare_stations(a, b)

Compare two stations and return the missing elements

It uses grid_model_ids to compare the assets, busbars and couplers. It does not consider different switching states or coupler states, but just checks if all the elements are also in the other station.

PARAMETER DESCRIPTION
a

The first station to compare.

TYPE: Station

b

The second station to compare.

TYPE: Station

RETURNS DESCRIPTION
list[BusbarCoupler]

The couplers that are in a but not in b.

list[BusbarCoupler]

The couplers that are in b but not in a.

list[Busbar]

The busbars that are in a but not in b.

list[Busbar]

The busbars that are in b but not in a.

list[SwitchableAsset]

The assets that are in a but not in b.

list[SwitchableAsset]

The assets that are in b but not in a.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def compare_stations(
    a: Station, b: Station
) -> tuple[
    list[BusbarCoupler],
    list[BusbarCoupler],
    list[Busbar],
    list[Busbar],
    list[SwitchableAsset],
    list[SwitchableAsset],
]:
    """Compare two stations and return the missing elements

    It uses grid_model_ids to compare the assets, busbars and couplers. It does not consider
    different switching states or coupler states, but just checks if all the elements are also in
    the other station.

    Parameters
    ----------
    a : Station
        The first station to compare.
    b : Station
        The second station to compare.

    Returns
    -------
    list[BusbarCoupler]
        The couplers that are in a but not in b.
    list[BusbarCoupler]
        The couplers that are in b but not in a.
    list[Busbar]
        The busbars that are in a but not in b.
    list[Busbar]
        The busbars that are in b but not in a.
    list[SwitchableAsset]
        The assets that are in a but not in b.
    list[SwitchableAsset]
        The assets that are in b but not in a.
    """
    a_busbars = set(busbar.grid_model_id for busbar in a.busbars)
    b_busbars = set(busbar.grid_model_id for busbar in b.busbars)

    a_couplers = set(coupler.grid_model_id for coupler in a.couplers)
    b_couplers = set(coupler.grid_model_id for coupler in b.couplers)

    a_assets = set(asset.grid_model_id for asset in a.assets)
    b_assets = set(asset.grid_model_id for asset in b.assets)

    return (
        [coupler for coupler in a.couplers if coupler.grid_model_id not in b_couplers],
        [coupler for coupler in b.couplers if coupler.grid_model_id not in a_couplers],
        [busbar for busbar in a.busbars if busbar.grid_model_id not in b_busbars],
        [busbar for busbar in b.busbars if busbar.grid_model_id not in a_busbars],
        [asset for asset in a.assets if asset.grid_model_id not in b_assets],
        [asset for asset in b.assets if asset.grid_model_id not in a_assets],
    )

load_asset_topology_fs #

load_asset_topology_fs(filesystem, file_path)

Load an asset topology from a file system.

PARAMETER DESCRIPTION
filesystem

The file system to use to load the asset topology.

TYPE: AbstractFileSystem

file_path

The path to the file containing the asset topology in json format.

TYPE: Union[str, Path]

RETURNS DESCRIPTION
Topology

The loaded asset topology.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def load_asset_topology_fs(
    filesystem: AbstractFileSystem,
    file_path: Union[str, Path],
) -> Topology:
    """Load an asset topology from a file system.

    Parameters
    ----------
    filesystem : AbstractFileSystem
        The file system to use to load the asset topology.
    file_path : Union[str, Path]
        The path to the file containing the asset topology in json format.

    Returns
    -------
    Topology
        The loaded asset topology.
    """
    return load_pydantic_model_fs(
        filesystem=filesystem,
        file_path=file_path,
        model_class=Topology,
    )

load_asset_topology #

load_asset_topology(filename)

Load an asset topology from a file

PARAMETER DESCRIPTION
filename

The filename to load the asset topology from

TYPE: Union[str, Path]

RETURNS DESCRIPTION
Topology

The loaded asset topology

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def load_asset_topology(filename: Union[str, Path]) -> Topology:
    """Load an asset topology from a file

    Parameters
    ----------
    filename : Union[str, Path]
        The filename to load the asset topology from

    Returns
    -------
    Topology
        The loaded asset topology
    """
    return load_asset_topology_fs(
        filesystem=LocalFileSystem(),
        file_path=filename,
    )

save_asset_topology_fs #

save_asset_topology_fs(
    filesystem, filename, asset_topology
)

Save an asset topology to a file system

PARAMETER DESCRIPTION
filesystem

The file system to save the asset topology to

TYPE: AbstractFileSystem

filename

The filename to save the asset topology to

TYPE: Union[str, Path]

asset_topology

The asset topology to save

TYPE: Topology

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def save_asset_topology_fs(filesystem: AbstractFileSystem, filename: Union[str, Path], asset_topology: Topology) -> None:
    """Save an asset topology to a file system

    Parameters
    ----------
    filesystem : AbstractFileSystem
        The file system to save the asset topology to
    filename : Union[str, Path]
        The filename to save the asset topology to
    asset_topology: Topology
        The asset topology to save
    """
    with filesystem.open(str(filename), "w", encoding="utf-8") as file:
        file.write(asset_topology.model_dump_json(indent=2))

save_asset_topology #

save_asset_topology(filename, asset_topology)

Save an asset topology to a file

PARAMETER DESCRIPTION
filename

The filename to save the asset topology to

TYPE: Union[str, Path]

asset_topology

The asset topology to save

TYPE: Topology

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def save_asset_topology(filename: Union[str, Path], asset_topology: Topology) -> None:
    """Save an asset topology to a file

    Parameters
    ----------
    filename : Union[str, Path]
        The filename to save the asset topology to
    asset_topology: Topology
        The asset topology to save
    """
    save_asset_topology_fs(LocalFileSystem(), filename, asset_topology)

get_connected_assets #

get_connected_assets(station, busbar_index)

Get the assets connected to a specific busbar in a station.

PARAMETER DESCRIPTION
station

The station object containing the switching table and assets.

TYPE: Station

busbar_index

The index of the busbar in the switching table.

TYPE: int

RETURNS DESCRIPTION
list of SwitchableAsset

A list of assets connected to the specified busbar.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def get_connected_assets(station: Station, busbar_index: int) -> list[SwitchableAsset]:
    """
    Get the assets connected to a specific busbar in a station.

    Parameters
    ----------
    station : Station
        The station object containing the switching table and assets.
    busbar_index : int
        The index of the busbar in the switching table.

    Returns
    -------
    list of SwitchableAsset
        A list of assets connected to the specified busbar.
    """
    connected_asset_indices = np.nonzero(station.asset_switching_table[busbar_index])[0]
    return [station.assets[i] for i in connected_asset_indices if station.assets[i].in_service]

accumulate_diffs #

accumulate_diffs(realized_stations)

Accumulate the diffs of the realized stations into the format of realized topology

PARAMETER DESCRIPTION
realized_stations

The realized stations to accumulate

TYPE: list[RealizedStation]

RETURNS DESCRIPTION
list[tuple[str, BusbarCoupler]]

The accumulated coupler diff

list[tuple[str, int, int, bool]]

The accumulated reassignment diff

list[tuple[str, int]]

The accumulated disconnection diff

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def accumulate_diffs(
    realized_stations: list[RealizedStation],
) -> tuple[
    list[tuple[str, BusbarCoupler]],
    list[tuple[str, int, int, bool]],
    list[tuple[str, int]],
]:
    """Accumulate the diffs of the realized stations into the format of realized topology

    Parameters
    ----------
    realized_stations : list[RealizedStation]
        The realized stations to accumulate

    Returns
    -------
    list[tuple[str, BusbarCoupler]]
        The accumulated coupler diff
    list[tuple[str, int, int, bool]]
        The accumulated reassignment diff
    list[tuple[str, int]]
        The accumulated disconnection diff
    """
    coupler_diff = []
    reassignment_diff = []
    disconnection_diff = []
    for station in realized_stations:
        s_id = station.station.grid_model_id
        coupler_diff.extend([(s_id, coupler) for coupler in station.coupler_diff])
        reassignment_diff.extend(
            [(s_id, asset_idx, bus_idx, connected) for (asset_idx, bus_idx, connected) in station.reassignment_diff]
        )
        disconnection_diff.extend([(s_id, asset_idx) for asset_idx in station.disconnection_diff])

    return coupler_diff, reassignment_diff, disconnection_diff

station_diff #

station_diff(start_station, target_station)

Compute the diff between two stations

The same station must be described by both inputs, i.e. the assets, busbars and couplers (except for their open state) must be the same.

PARAMETER DESCRIPTION
start_station

The starting station from which to start the diff

TYPE: Station

target_station

The ending station to which the diff shall lead when applied to the starting station

TYPE: Station

RETURNS DESCRIPTION
RealizedStation

The realized station containing the target station and the coupler, reassignment and disconnection diffs

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def station_diff(
    start_station: Station,
    target_station: Station,
) -> RealizedStation:
    """Compute the diff between two stations

    The same station must be described by both inputs, i.e. the assets, busbars and couplers (except for their open state)
    must be the same.

    Parameters
    ----------
    start_station : Station
        The starting station from which to start the diff
    target_station : Station
        The ending station to which the diff shall lead when applied to the starting station

    Returns
    -------
    RealizedStation
        The realized station containing the target station and the coupler, reassignment and disconnection diffs
    """
    assert [s.grid_model_id for s in start_station.assets] == [s.grid_model_id for s in target_station.assets], (
        "Assets do not match"
    )
    assert [b.grid_model_id for b in start_station.busbars] == [b.grid_model_id for b in target_station.busbars], (
        "Busbars do not match"
    )
    assert [b.grid_model_id for b in start_station.couplers] == [b.grid_model_id for b in target_station.couplers], (
        "Couplers do not match"
    )

    reassignment_diff = []
    disconnection_diff = []
    for asset_index in range(len(start_station.assets)):
        target_disconnected = ~np.any(target_station.asset_switching_table[:, asset_index])
        start_disconnected = ~np.any(start_station.asset_switching_table[:, asset_index])

        if start_disconnected and not target_disconnected:
            raise NotImplementedError(
                "Reconnections are not supported yet, there is no diff for that"
                + f" (asset {asset_index} in station {start_station.grid_model_id})"
            )

        if target_disconnected and not start_disconnected:
            # if the asset is connected in the target station but disconnected in the starting station
            # we need to add it to the diff
            disconnection_diff.append(asset_index)

        # No reassignment diff if the asset is disconnected
        if target_disconnected:
            continue

        xor_diff = np.logical_xor(
            start_station.asset_switching_table[:, asset_index],
            target_station.asset_switching_table[:, asset_index],
        )
        for busbar_index in np.flatnonzero(xor_diff):
            # If the start and end switching entries differ, add it to the reassignment diff
            reassignment_diff.append(
                (asset_index, busbar_index, target_station.asset_switching_table[busbar_index, asset_index])
            )

    coupler_diff = []
    for start_coupler, target_coupler in zip(start_station.couplers, target_station.couplers, strict=True):
        if start_coupler.open != target_coupler.open:
            coupler_diff.append(target_coupler)

    return RealizedStation(
        station=target_station,
        coupler_diff=coupler_diff,
        reassignment_diff=reassignment_diff,
        disconnection_diff=disconnection_diff,
    )

topology_diff #

topology_diff(start_topo, target_topo)

Compute the difference between two topologies

PARAMETER DESCRIPTION
start_topo

The starting topology

TYPE: Topology

target_topo

The targeted topology.

TYPE: Topology

RETURNS DESCRIPTION
RealizedTopology

The realized topology containing the target topology and the coupler, reassignment and disconnection diffs that lead from the starting topology to the target topology.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def topology_diff(
    start_topo: Topology,
    target_topo: Topology,
) -> RealizedTopology:
    """Compute the difference between two topologies

    Parameters
    ----------
    start_topo : Topology
        The starting topology
    target_topo : Topology
        The targeted topology.

    Returns
    -------
    RealizedTopology
        The realized topology containing the target topology and the coupler, reassignment and disconnection diffs that lead
        from the starting topology to the target topology.
    """
    realized_stations = [
        station_diff(start_station, target_station)
        for (start_station, target_station) in zip(start_topo.stations, target_topo.stations, strict=True)
    ]
    coupler_diff, reassignment_diff, disconnection_diff = accumulate_diffs(realized_stations)
    return RealizedTopology(
        topology=target_topo,
        coupler_diff=coupler_diff,
        reassignment_diff=reassignment_diff,
        disconnection_diff=disconnection_diff,
    )

order_station_assets #

order_station_assets(station, asset_ids)

Orders the assets in a station according to a list of asset ids.

If an asset is not found in the station, it will be added to the not_found list. If an asset is not present in the asset_ids list, it will be dropped from the station.

PARAMETER DESCRIPTION
station

The station to order.

TYPE: Station

asset_ids

A list of asset ids. The assets will be ordered according to the grid_model_id.

TYPE: list[str]

RETURNS DESCRIPTION
Station

The ordered station

list[str]

A list of asset ids that were not found in the station

list[str]

A list of asset ids that were ignored, i.e. not present in the asset_ids list

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def order_station_assets(station: Station, asset_ids: list[str]) -> tuple[Station, list[str]]:
    """Orders the assets in a station according to a list of asset ids.

    If an asset is not found in the station, it will be added to the not_found list.
    If an asset is not present in the asset_ids list, it will be dropped from the station.

    Parameters
    ----------
    station : Station
        The station to order.
    asset_ids : list[str]
        A list of asset ids. The assets will be ordered according to the grid_model_id.

    Returns
    -------
    Station
        The ordered station
    list[str]
        A list of asset ids that were not found in the station
    list[str]
        A list of asset ids that were ignored, i.e. not present in the asset_ids list
    """
    new_assets = []
    not_found = []
    old_positions = []
    for asset_id in asset_ids:
        found = False
        for pos, asset in enumerate(station.assets):
            if asset.grid_model_id == asset_id:
                new_assets.append(asset)
                old_positions.append(pos)
                found = True
                break
        if not found:
            not_found.append(asset_id)

    ignored = [asset.grid_model_id for index, asset in enumerate(station.assets) if index not in old_positions]

    asset_switching_table = station.asset_switching_table[:, old_positions]
    asset_connectivity = station.asset_connectivity[:, old_positions] if station.asset_connectivity is not None else None
    station = station.model_copy(
        update={
            "assets": new_assets,
            "asset_switching_table": asset_switching_table,
            "asset_connectivity": asset_connectivity,
        }
    )
    Station.model_validate(station)
    return station, not_found, ignored

order_topology #

order_topology(topology, station_ids)

Orders the stations in a topology according to a list of ids.

If a station is not found in the topology, it will be added to the not_found list. If a station is not present in the station_ids list, it will be dropped.

PARAMETER DESCRIPTION
topology

The topology to order.

TYPE: Topology

station_ids

A list of station ids. The stations will be ordered according to the grid_model_id.

TYPE: list[str]

RETURNS DESCRIPTION
Topology

The ordered topology

list[str]

A list of station ids that were not found in the topology

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def order_topology(topology: Topology, station_ids: list[str]) -> tuple[Topology, list[str]]:
    """Orders the stations in a topology according to a list of ids.

    If a station is not found in the topology, it will be added to the not_found list.
    If a station is not present in the station_ids list, it will be dropped.

    Parameters
    ----------
    topology : Topology
        The topology to order.
    station_ids : list[str]
        A list of station ids. The stations will be ordered according to the grid_model_id.

    Returns
    -------
    Topology
        The ordered topology
    list[str]
        A list of station ids that were not found in the topology
    """
    new_stations = []
    not_found = []
    for relevant_node in station_ids:
        found = False
        for station in topology.stations:
            if station.grid_model_id == relevant_node:
                new_stations.append(station)
                found = True
                break
        if not found:
            not_found.append(relevant_node)

    topology = topology.model_copy(update={"stations": new_stations})
    return topology, not_found

fuse_coupler #

fuse_coupler(
    station, coupler_grid_model_id, copy_info_from=True
)

Fuses a coupler by merging the adjacent busbars into one busbar.

PARAMETER DESCRIPTION
station

The station with the coupler to fuse. Assumes that the coupler is in the station. The open/closed state of the coupler is disregarded, i.e. the coupler will be fused regardless of its state.

TYPE: Station

coupler_grid_model_id

The grid_model_id of the coupler to fuse

TYPE: str

copy_info_from

Whether the new busbar retains the information (grid_model_id, etc) of the busbar on the from side of the coupler, by default True. If False, the busbar on the to side of the coupler is used.

TYPE: bool DEFAULT: True

RETURNS DESCRIPTION
Station

The station with the coupler fused. The coupler is removed and the busbars are merged.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def fuse_coupler(
    station: Station,
    coupler_grid_model_id: str,
    copy_info_from: bool = True,
) -> Station:
    """Fuses a coupler by merging the adjacent busbars into one busbar.

    Parameters
    ----------
    station : Station
        The station with the coupler to fuse. Assumes that the coupler is in the station. The open/closed state of the
        coupler is disregarded, i.e. the coupler will be fused regardless of its state.
    coupler_grid_model_id : str
        The grid_model_id of the coupler to fuse
    copy_info_from : bool, optional
        Whether the new busbar retains the information (grid_model_id, etc) of the busbar on the from side of the coupler,
        by default True. If False, the busbar on the to side of the coupler is used.

    Returns
    -------
    Station
        The station with the coupler fused. The coupler is removed and the busbars are merged.
    """
    coupler = next((c for c in station.couplers if c.grid_model_id == coupler_grid_model_id), None)
    if coupler is None:
        raise ValueError(f"Coupler {coupler_grid_model_id} not found in station {station.grid_model_id}")

    busbar_from_index = next((index for index, b in enumerate(station.busbars) if b.int_id == coupler.busbar_from_id), None)
    busbar_to_index = next((index for index, b in enumerate(station.busbars) if b.int_id == coupler.busbar_to_id), None)

    assert busbar_from_index is not None, f"Busbar {coupler.busbar_from_id} not found in station {station.grid_model_id}"
    assert busbar_to_index is not None, f"Busbar {coupler.busbar_to_id} not found in station {station.grid_model_id}"

    if (
        len(
            [
                c
                for c in station.couplers
                if (c.busbar_from_id == coupler.busbar_from_id and c.busbar_to_id == coupler.busbar_to_id)
                or (c.busbar_to_id == coupler.busbar_from_id and c.busbar_from_id == coupler.busbar_to_id)
            ]
        )
        > 1
    ):
        raise ValueError(
            f"Coupler {coupler_grid_model_id} has parallel couplers in station {station.grid_model_id}, "
            "cannot fuse parallel couplers with the same busbars"
        )

    switching_row = station.asset_switching_table[busbar_from_index] | station.asset_switching_table[busbar_to_index]
    connectivity_row = station.asset_connectivity[busbar_from_index] | station.asset_connectivity[busbar_to_index]

    # Remove either the from or the to busbar
    busbar_index_to_remove = busbar_to_index if copy_info_from else busbar_from_index
    busbar_index_to_keep = busbar_from_index if copy_info_from else busbar_to_index

    new_switching_table = np.copy(station.asset_switching_table)
    new_switching_table[busbar_index_to_keep] = switching_row
    new_switching_table = np.delete(new_switching_table, busbar_index_to_remove, axis=0)

    if station.asset_connectivity is not None:
        new_connectivity_table = np.copy(station.asset_connectivity)
        new_connectivity_table[busbar_index_to_keep] = connectivity_row
        new_connectivity_table = np.delete(new_connectivity_table, busbar_index_to_remove, axis=0)
    else:
        new_connectivity_table = None

    busbar_to_remove = station.busbars[busbar_index_to_remove]
    busbar_to_keep = station.busbars[busbar_index_to_keep]

    def _replace_sr_keys(asset: SwitchableAsset) -> SwitchableAsset:
        """Update the sr switch asset if it is connected to the removed busbar."""
        if asset.asset_bay is None:
            return asset
        if (
            busbar_to_remove.grid_model_id in asset.asset_bay.sr_switch_grid_model_id.keys()
            and busbar_to_keep.grid_model_id in asset.asset_bay.sr_switch_grid_model_id.keys()
        ):
            # If both busbars are present, we need to remove the one that is not kept
            new_sr_switch_grid_model_id = {
                key: foreign_id
                for (key, foreign_id) in asset.asset_bay.sr_switch_grid_model_id.items()
                if key != busbar_to_remove.grid_model_id
            }
        else:
            # If the target busbar is not present, we change the key
            new_sr_switch_grid_model_id = {
                (busbar_to_keep.grid_model_id if key == busbar_to_remove.grid_model_id else key): foreign_id
                for (key, foreign_id) in asset.asset_bay.sr_switch_grid_model_id.items()
            }

        return asset.model_copy(
            update={"asset_bay": asset.asset_bay.model_copy(update={"sr_switch_grid_model_id": new_sr_switch_grid_model_id})}
        )

    def _replace_int_id(coupler: BusbarCoupler) -> BusbarCoupler:
        """Update coupler int-ids that are pointing to the removed busbar."""
        if coupler.busbar_from_id == busbar_to_remove.int_id:
            return coupler.model_copy(update={"busbar_from_id": busbar_to_keep.int_id})
        if coupler.busbar_to_id == busbar_to_remove.int_id:
            return coupler.model_copy(update={"busbar_to_id": busbar_to_keep.int_id})
        return coupler

    new_busbars = [b for i, b in enumerate(station.busbars) if i != busbar_index_to_remove]
    new_couplers = [_replace_int_id(c) for c in station.couplers if c.grid_model_id != coupler_grid_model_id]
    new_assets = [_replace_sr_keys(a) for a in station.assets]

    station = station.model_copy(
        update={
            "busbars": new_busbars,
            "asset_switching_table": new_switching_table,
            "asset_connectivity": new_connectivity_table,
            "assets": new_assets,
            "couplers": new_couplers,
        }
    )
    Station.model_validate(station)
    return station

fuse_all_couplers_with_type #

fuse_all_couplers_with_type(
    station, coupler_type, copy_info_from=True
)

Fuses all couplers of a specific type in a station.

This will also filter all duplicate couplers, as there might be an edge case in which a triangle of busbars is not properly merged otherwise.

PARAMETER DESCRIPTION
station

The station with the couplers to fuse

TYPE: Station

coupler_type

The type of coupler to fuse, will match the coupler.type attribute. If coupler.type is None, it will never match.

TYPE: str

copy_info_from

Whether the new busbar retains the information (grid_model_id, etc) of the busbar on the from side of the coupler, by default True. If False, the busbar on the to side of the coupler is used.

TYPE: bool DEFAULT: True

RETURNS DESCRIPTION
Station

The station with the couplers fused that matched the type. The couplers are removed and the busbars are merged.

list[BusbarCoupler]

The couplers that were fused or removed due to being parallel and are no longer present in the station.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def fuse_all_couplers_with_type(
    station: Station,
    coupler_type: str,
    copy_info_from: bool = True,
) -> tuple[Station, list[BusbarCoupler]]:
    """Fuses all couplers of a specific type in a station.

    This will also filter all duplicate couplers, as there might be an edge case in which a triangle of busbars is not
    properly merged otherwise.

    Parameters
    ----------
    station : Station
        The station with the couplers to fuse
    coupler_type : str
        The type of coupler to fuse, will match the coupler.type attribute. If coupler.type is None, it will never match.
    copy_info_from : bool, optional
        Whether the new busbar retains the information (grid_model_id, etc) of the busbar on the from side of the coupler,
        by default True. If False, the busbar on the to side of the coupler is used.

    Returns
    -------
    Station
        The station with the couplers fused that matched the type. The couplers are removed and the busbars are merged.
    list[BusbarCoupler]
        The couplers that were fused or removed due to being parallel and are no longer present in the station.
    """
    fused_couplers = []
    while True:
        coupler = next((c for c in station.couplers if (c.type is not None and c.type == coupler_type)), None)
        if coupler is None:
            break

        station = fuse_coupler(station, coupler.grid_model_id, copy_info_from=copy_info_from)
        fused_couplers.append(coupler)

        # We want to retain the coupler type that we filter for and remove all other couplers - this way we can
        # make sure that all parallel couplers are removed. Otherwise it would depend on the order of the couplers
        # whether there would be a residual coupler left between the potentially fused busbars.
        station, removed = filter_duplicate_couplers(station, retain_type_hierarchy=[coupler_type])
        fused_couplers.extend(removed)

    return station, fused_couplers

find_station_by_id #

find_station_by_id(stations, station_id)

Find a station by its grid_model_id in a list of stations.

PARAMETER DESCRIPTION
stations

The list of stations to search in.

TYPE: list[Station]

station_id

The grid_model_id of the station to find.

TYPE: str

RETURNS DESCRIPTION
Station

The station with the given grid_model_id.

RAISES DESCRIPTION
ValueError

If the station is not found in the list.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_helpers.py
def find_station_by_id(stations: list[Station], station_id: str) -> Station:
    """Find a station by its grid_model_id in a list of stations.

    Parameters
    ----------
    stations : list[Station]
        The list of stations to search in.
    station_id : str
        The grid_model_id of the station to find.

    Returns
    -------
    Station
        The station with the given grid_model_id.

    Raises
    ------
    ValueError
        If the station is not found in the list.
    """
    for station in stations:
        if station.grid_model_id == station_id:
            return station
    raise ValueError(f"Station {station_id} not found in the list")

Asset Topology Loadflow#

toop_engine_interfaces.asset_topology_loadflow #

Provides an interface for storing loadflow results in the asset topology.

It inherits from the asset_topology interface but adds additional fields for storing loadflow results.

SwitchableAssetWithLF #

Bases: SwitchableAsset

A switchable asset with additional fields for storing loadflow results.

All fields are optional because json does not support nan/inf values. If a value is nan/inf, it will be converted to None by the field_validtators

p instance-attribute #

p

The active power flow over the asset in MW.

q instance-attribute #

q

The reactive power flow over the asset in MVar.

i instance-attribute #

i

The current flow over the asset in kA.

i_max instance-attribute #

i_max

The maximum current allowed over the asset, if present. For assets without a current limit, this is None

grid_model_id instance-attribute #

grid_model_id

The unique identifier of the asset. Corresponds to the asset's id in the grid model.

type class-attribute instance-attribute #

type = None

The type of the asset. These refer loosely to the types in the pandapower/powsybl grid models. If set, this can be used to disambiguate branches from injections

name class-attribute instance-attribute #

name = None

The name of the asset, might be useful for finding the asset later on

in_service class-attribute instance-attribute #

in_service = True

If the element is in service. False means the switching entry for this element will be ignored. This shall not be used for elements intentionally disconnected, instead set all zeros in the switching table.

branch_end class-attribute instance-attribute #

branch_end = None

If the asset was a branch, this can store which end of the branch was connected to the station in the original grid model. This can take the values "from", "to", "hv", "mv", "lv", where from/to works for lines and hv/mv/lv works for transformers. This should only be set if this is needed for the postprocessing, in theory a branch should be identifiable by the branch id and the station id. Injection-type assets like generators and loads should not have this set.

asset_bay class-attribute instance-attribute #

asset_bay = None

The asset bay (Schaltfeld) of the asset. The connection path is used to determine the physical connection of the asset to the busbar. None of these switches will be found in the network model, they are only used for the asset topology.

convert_nan classmethod #

convert_nan(value)

Replace nan/inf values with None

PARAMETER DESCRIPTION
value

The value to check for nan/inf

TYPE: float

RETURNS DESCRIPTION
Optional[float]

The value, or None if it was nan/inf

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_loadflow.py
@field_validator("p", "q", "i", "i_max")
@classmethod
def convert_nan(cls, value: float) -> Optional[float]:
    """Replace nan/inf values with None

    Parameters
    ----------
    value : float
        The value to check for nan/inf

    Returns
    -------
    Optional[float]
        The value, or None if it was nan/inf
    """
    if value is None or math.isnan(value) or math.isinf(value):
        return None
    return value

is_branch #

is_branch()

Return True if the asset is a branch.

Only works if the type is set. If type is not set this will return None.

RETURNS DESCRIPTION
bool

True if the asset is a branch, False if it is an injection.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology.py
def is_branch(self) -> Optional[bool]:
    """Return True if the asset is a branch.

    Only works if the type is set. If type is not set this will return None.

    Returns
    -------
    bool
        True if the asset is a branch, False if it is an injection.
    """
    if self.type is None:
        return None
    return self.type in get_args(AssetBranchType)

BusbarWithLF #

Bases: Busbar

A busbar with additional fields for storing loadflow results.

va instance-attribute #

va

The voltage angle at the busbar in degrees

vm instance-attribute #

vm

The voltage magnitude at the busbar in kV

grid_model_id instance-attribute #

grid_model_id

The unique identifier of the busbar. Corresponds to the busbar's id in the grid model.

type class-attribute instance-attribute #

type = None

The type of the busbar, might be useful for finding the busbar later on

name class-attribute instance-attribute #

name = None

The name of the busbar, might be useful for finding the busbar later on

int_id instance-attribute #

int_id

Is used to reference busbars in the couplers. Needs to be unique per station

in_service class-attribute instance-attribute #

in_service = True

Whether the busbar is in service. If False, it will be ignored in the switching table

bus_branch_bus_id class-attribute instance-attribute #

bus_branch_bus_id = None

The bus_branch_bus_id refers to the bus-branch model bus id. There might be a difference between the busbar grid_model_id (a physical busbar) and the bus_branch_bus_id from the bus-branch model. Use this bus_branch_bus_id to store the bus-branch model bus id. Note: the Station grid_model_id also a bus-branch bus_branch_bus_id. This id is the most splitable bus_branch_bus_id. Other bus_branch_bus_ids are part of the physical station, but are separated by a coupler or branch.

convert_nan classmethod #

convert_nan(value)

Replace nan/inf values with None

PARAMETER DESCRIPTION
value

The value to check for nan/inf

TYPE: float

RETURNS DESCRIPTION
Optional[float]

The value, or None if it was nan/inf

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_loadflow.py
@field_validator("va", "vm")
@classmethod
def convert_nan(cls, value: float) -> Optional[float]:
    """Replace nan/inf values with None

    Parameters
    ----------
    value : float
        The value to check for nan/inf

    Returns
    -------
    Optional[float]
        The value, or None if it was nan/inf
    """
    if value is None or math.isnan(value) or math.isinf(value):
        return None
    return value

StationWithLF #

Bases: Station

A station with additional fields for storing loadflow results.

busbars instance-attribute #

busbars

The busbars, overloaded from station replacing the Busbar class with BusbarWithLF

assets instance-attribute #

assets

The assets, overloaded from station replacing the SwitchableAsset class with SwitchableAssetWithLF

grid_model_id instance-attribute #

grid_model_id

The unique identifier of the station. Corresponds to the stations's id in the grid model. Expects the bus-branch model bus_id, which is the most splitable bus_id.

name class-attribute instance-attribute #

name = None

The name of the station.

type class-attribute instance-attribute #

type = None

The type of the station.

region class-attribute instance-attribute #

region = None

The region of the station.

voltage_level class-attribute instance-attribute #

voltage_level = None

The voltage level of the station.

couplers instance-attribute #

couplers

The list of couplers at the station.

asset_switching_table instance-attribute #

asset_switching_table

Holds the switching of each asset to each busbar, shape (n_bus, n_asset).

An entry is true if the asset is connected to the busbar. Note: An asset can be connected to multiple busbars, in which case a closed coupler is assumed to be present between these busbars Note: An asset can be connected to none of the busbars. In this case, the asset is intentionally disconnected as part of a transmission line switching action. In practice, this usually involves a separate switch from the asset-to-busbar couplers, as each asset usually has a switch that completely disconnects it from the station. These switches are not modelled here, a postprocessing routine needs to do the translation to this physical layout. Do not use in_service for intentional disconnections.

asset_connectivity class-attribute instance-attribute #

asset_connectivity = None

Holds the all possible layouts of the asset_switching_table, shape (n_bus, n_asset).

An entry is true if it is possible to connect an asset to the busbar. If None, it is assumed that all branches can be connected to all busbars.

model_log class-attribute instance-attribute #

model_log = None

Holds log messages from the model creation process.

This can be used to store information about the model creation process, e.g. warnings or errors. A potential use case is to inform the user about data quality issues e.g. missing the Asset Bay switches.

check_int_id_unique classmethod #

check_int_id_unique(v)

Check if int_id is unique for all busbars.

PARAMETER DESCRIPTION
v

The list of busbars to check.

TYPE: list[Busbar]

RETURNS DESCRIPTION
list[Busbar]

The list of busbars.

RAISES DESCRIPTION
ValueError

If int_id is not unique for all busbars.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology.py
@field_validator("busbars")
@classmethod
def check_int_id_unique(cls, v: list[Busbar]) -> list[Busbar]:
    """Check if int_id is unique for all busbars.

    Parameters
    ----------
    v : list[Busbar]
        The list of busbars to check.

    Returns
    -------
    list[Busbar]
        The list of busbars.

    Raises
    ------
    ValueError
        If int_id is not unique for all busbars.
    """
    int_ids = [busbar.int_id for busbar in v]
    if len(int_ids) != len(set(int_ids)):
        raise ValueError("int_id must be unique for busbars")
    return v

check_coupler_busbars_different classmethod #

check_coupler_busbars_different(v)

Check if busbar_from_id and busbar_to_id are different for all couplers.

PARAMETER DESCRIPTION
v

The list of couplers to check.

TYPE: list[BusbarCoupler]

RETURNS DESCRIPTION
list[BusbarCoupler]

The list of couplers.

RAISES DESCRIPTION
ValueError

If busbar_from_id and busbar_to_id are the same for any coupler.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology.py
@field_validator("couplers")
@classmethod
def check_coupler_busbars_different(cls, v: list[BusbarCoupler]) -> list[BusbarCoupler]:
    """Check if busbar_from_id and busbar_to_id are different for all couplers.

    Parameters
    ----------
    v : list[BusbarCoupler]
        The list of couplers to check.

    Returns
    -------
    list[BusbarCoupler]
        The list of couplers.

    Raises
    ------
    ValueError
        If busbar_from_id and busbar_to_id are the same for any coupler.
    """
    for coupler in v:
        if coupler.busbar_from_id == coupler.busbar_to_id:
            raise ValueError(f"busbar_from_id and busbar_to_id must be different for coupler {coupler.grid_model_id}")
    return v

check_busbar_exists #

check_busbar_exists()

Check if all busbars in couplers exist in the busbars list.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology.py
@model_validator(mode="after")
def check_busbar_exists(self: "Station") -> "Station":
    """Check if all busbars in couplers exist in the busbars list."""
    busbar_ids = [busbar.int_id for busbar in self.busbars]
    for coupler in self.couplers:
        if coupler.busbar_from_id not in busbar_ids:
            raise ValueError(
                f"busbar_from_id {coupler.busbar_from_id} in coupler {coupler.grid_model_id} does not exist in busbars."
                f" Station_id: {self.grid_model_id}, Name: {self.name}"
            )
        if coupler.busbar_to_id not in busbar_ids:
            raise ValueError(
                f"busbar_to_id {coupler.busbar_to_id} in coupler {coupler.grid_model_id} does not exist in busbars"
                f" Station_id: {self.grid_model_id}, Name: {self.name}"
            )
    return self

check_coupler_references #

check_coupler_references()

Check if all closed couplers reference in-service busbars.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology.py
@model_validator(mode="after")
def check_coupler_references(self: "Station") -> "Station":
    """Check if all closed couplers reference in-service busbars."""
    busbar_state_map = {busbar.int_id: busbar.in_service for busbar in self.busbars}
    for coupler in self.couplers:
        if coupler.open or not coupler.in_service:
            continue
        if busbar_state_map[coupler.busbar_from_id] != busbar_state_map[coupler.busbar_to_id]:
            raise ValueError(
                f"Closed coupler {coupler.grid_model_id} connects out-of-service busbar with in-service busbar."
                f" Station_id: {self.grid_model_id}, Name: {self.name}"
            )
    return self

check_asset_switching_table_shape #

check_asset_switching_table_shape()

Check if the switching table shape matches the busbars and assets.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology.py
@model_validator(mode="after")
def check_asset_switching_table_shape(self: "Station") -> "Station":
    """Check if the switching table shape matches the busbars and assets."""
    if self.asset_switching_table.shape != (len(self.busbars), len(self.assets)):
        raise ValueError(
            f"asset_switching_table shape {self.asset_switching_table.shape} does not match busbars "
            f"{len(self.busbars)} and assets {len(self.assets)}"
            f" Station_id: {self.grid_model_id}, Name: {self.name}"
        )

    if self.asset_connectivity is not None:
        if self.asset_connectivity.shape != (len(self.busbars), len(self.assets)):
            raise ValueError(
                f"asset_connectivity shape {self.asset_connectivity.shape} does not match busbars "
                f"{len(self.busbars)} and assets {len(self.assets)}"
                f" Station_id: {self.grid_model_id}, Name: {self.name}"
            )

    return self

check_asset_switching_table_current_vs_physical #

check_asset_switching_table_current_vs_physical()

Check all current assignments are physically allowed.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology.py
@model_validator(mode="after")
def check_asset_switching_table_current_vs_physical(self: "Station") -> "Station":
    """Check all current assignments are physically allowed."""
    if self.asset_connectivity is not None:
        if np.logical_and(self.asset_switching_table, np.logical_not(self.asset_connectivity)).any():
            raise ValueError(
                f"Not all current assignments are physically allowed Station_id: {self.grid_model_id}, Name: {self.name}"
            )

    return self

check_asset_bay #

check_asset_bay()

Check if the asset bay bus is in busbars.

RETURNS DESCRIPTION
Station

The station itself.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology.py
@model_validator(mode="after")
def check_asset_bay(self: "Station") -> "Station":
    """Check if the asset bay bus is in busbars.

    Returns
    -------
    Station
        The station itself.
    """
    busbar_grid_model_id = [busbar.grid_model_id for busbar in self.busbars]
    for asset in self.assets:
        if asset.asset_bay is not None:
            for busbar_id in asset.asset_bay.sr_switch_grid_model_id.keys():
                if busbar_id not in busbar_grid_model_id:
                    raise ValueError(
                        f"busbar_id {busbar_id} in asset {asset.grid_model_id} does not exist in busbars"
                        f" Station_id: {self.grid_model_id}, Name: {self.name}"
                    )

    return self

check_bus_id #

check_bus_id()

Check if station grid_model_id is in the busbar.bus_branch_bus_id.

RETURNS DESCRIPTION
Station

The station itself.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology.py
@model_validator(mode="after")
def check_bus_id(self: "Station") -> "Station":
    """Check if station grid_model_id is in the busbar.bus_branch_bus_id.

    Returns
    -------
    Station
        The station itself.
    """
    busbar_grid_model_id = [busbar.bus_branch_bus_id for busbar in self.busbars if busbar.bus_branch_bus_id is not None]
    if len(busbar_grid_model_id) > 0 and self.grid_model_id not in busbar_grid_model_id:
        raise ValueError(
            f"Station grid_model_id {self.grid_model_id} does not exist in busbars bus_branch_bus_id"
            f" Station_id: {self.grid_model_id}, Name: {self.name}"
        )

    return self

__eq__ #

__eq__(other)

Check if two stations are equal.

PARAMETER DESCRIPTION
other

The other station to compare to.

TYPE: object

RETURNS DESCRIPTION
bool

True if the stations are equal, False otherwise.

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology.py
def __eq__(self, other: object) -> bool:
    """Check if two stations are equal.

    Parameters
    ----------
    other : object
        The other station to compare to.

    Returns
    -------
    bool
        True if the stations are equal, False otherwise.
    """
    if not isinstance(other, Station):
        return False
    return (
        self.grid_model_id == other.grid_model_id
        and self.region == other.region
        and self.busbars == other.busbars
        and self.couplers == other.couplers
        and self.assets == other.assets
        and np.array_equal(self.asset_switching_table, other.asset_switching_table)
        and (
            np.array_equal(self.asset_connectivity, other.asset_connectivity)
            if (self.asset_connectivity is not None and other.asset_connectivity is not None)
            else self.asset_connectivity == other.asset_connectivity
        )
    )

TopologyWithLF #

Bases: Topology

A topology with additional fields for storing loadflow results.

stations instance-attribute #

stations

The stations, overloaded from topology replacing the Station class with StationWithLF

topology_id instance-attribute #

topology_id

The unique identifier of the topology.

grid_model_file class-attribute instance-attribute #

grid_model_file = None

The grid model file that represents this timestep. Note that relevant folders might only work on the machine they have been created, so some sort of permanent storage server should be used to keep these files globally accessible

name class-attribute instance-attribute #

name = None

The name of the topology.

asset_setpoints class-attribute instance-attribute #

asset_setpoints = None

The list of asset setpoints in the topology.

timestamp instance-attribute #

timestamp

The timestamp which is represented by this topology during the original optimization. I.e. if this timestep was the 5 o clock timestep on the day that was optimized, then this timestamp would read 5 o clock.

metrics class-attribute instance-attribute #

metrics = None

The metrics of the topology.

map_loadflow_results_station #

map_loadflow_results_station(
    station, node_extractor, asset_extractor
)

Map loadflow results onto a station without loadflows.

This also converts nan/inf values to None to be compatible with json serialization

PARAMETER DESCRIPTION
station

The station to map loadflow results onto, using the plain asset_topology classes

TYPE: Station

node_extractor

A function that extracts voltage angle and voltage magnitude for a busbar from some loadflow results table. If any of the values can not be extracted, the extractor is free to return None

TYPE: Callable[[Busbar], tuple[Optional[float], Optional[float]]]

asset_extractor

A function that extracts active power, reactive power, current and maximum current for an asset from some loadflow results table. If any of the values can not be extracted, the extractor is free to return None

TYPE: Callable[[SwitchableAsset], tuple[Optional[float], Optional[float], Optional[float], Optional[float]]]

RETURNS DESCRIPTION
StationWithLF

The station with loadflow results mapped onto it

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_loadflow.py
def map_loadflow_results_station(
    station: Station,
    node_extractor: Callable[[Busbar], tuple[Optional[float], Optional[float]]],
    asset_extractor: Callable[
        [SwitchableAsset],
        tuple[Optional[float], Optional[float], Optional[float], Optional[float]],
    ],
) -> StationWithLF:
    """Map loadflow results onto a station without loadflows.

    This also converts nan/inf values to None to be compatible with json serialization

    Parameters
    ----------
    station : Station
        The station to map loadflow results onto, using the plain asset_topology classes
    node_extractor : Callable[[Busbar], tuple[Optional[float], Optional[float]]]
        A function that extracts voltage angle and voltage magnitude for a busbar from some loadflow
        results table. If any of the values can not be extracted, the extractor is free to return None
    asset_extractor : Callable[[SwitchableAsset], tuple[Optional[float], Optional[float], Optional[float], Optional[float]]]
        A function that extracts active power, reactive power, current and maximum current for
        an asset from some loadflow results table. If any of the values can not be extracted, the
        extractor is free to return None

    Returns
    -------
    StationWithLF
        The station with loadflow results mapped onto it
    """
    busbars = []
    for busbar in station.busbars:
        if not busbar.in_service:
            busbars.append(
                BusbarWithLF(
                    **busbar.model_dump(),
                    vm=None,
                    va=None,
                )
            )
        else:
            va, vm = node_extractor(busbar)
            busbars.append(
                BusbarWithLF(
                    **busbar.model_dump(),
                    vm=vm,
                    va=va,
                )
            )

    assets = []
    for asset in station.assets:
        if not asset.in_service:
            assets.append(
                SwitchableAssetWithLF(
                    **asset.model_dump(),
                    p=None,
                    q=None,
                    i=None,
                    i_max=None,
                )
            )
        else:
            p, q, i, i_max = asset_extractor(asset)
            assets.append(
                SwitchableAssetWithLF(
                    **asset.model_dump(),
                    p=p,
                    q=q,
                    i=i,
                    i_max=i_max,
                )
            )

    return StationWithLF(
        **station.model_dump(exclude=["busbars", "assets"]),
        busbars=busbars,
        assets=assets,
    )

map_loadflow_results_topology #

map_loadflow_results_topology(
    topology, node_extractor, asset_extractor
)

Map loadflow results onto a topology without loadflows

This also converts nan/inf values to None to be compatible with json serialization

PARAMETER DESCRIPTION
topology

The topology to map loadflow results onto, using the plain asset_topology classes

TYPE: Topology

node_extractor

A function that extracts voltage angle and voltage magnitude for a busbar from some loadflow results table. If any of the values can not be extracted, the extractor is free to return None

TYPE: Callable[[Busbar], tuple[Optional[float], Optional[float]]]

asset_extractor

A function that extracts active power, reactive power, current and maximum current for an asset from some loadflow results table. If any of the values can not be extracted, the extractor is free to return None

TYPE: Callable[[SwitchableAsset], tuple[Optional[float], Optional[float], Optional[float], Optional[float]]]

RETURNS DESCRIPTION
TopologyWithLF

The topology with loadflow results mapped onto it

Source code in packages/interfaces_pkg/src/toop_engine_interfaces/asset_topology_loadflow.py
def map_loadflow_results_topology(
    topology: Topology,
    node_extractor: Callable[[Busbar], tuple[Optional[float], Optional[float]]],
    asset_extractor: Callable[
        [SwitchableAsset],
        tuple[Optional[float], Optional[float], Optional[float], Optional[float]],
    ],
) -> TopologyWithLF:
    """Map loadflow results onto a topology without loadflows

    This also converts nan/inf values to None to be compatible with json serialization

    Parameters
    ----------
    topology : Topology
        The topology to map loadflow results onto, using the plain asset_topology classes
    node_extractor : Callable[[Busbar], tuple[Optional[float], Optional[float]]]
        A function that extracts voltage angle and voltage magnitude for a busbar from some loadflow
        results table. If any of the values can not be extracted, the extractor is free to return None
    asset_extractor : Callable[[SwitchableAsset], tuple[Optional[float], Optional[float], Optional[float], Optional[float]]]
        A function that extracts active power, reactive power, current and maximum current for
        an asset from some loadflow results table. If any of the values can not be extracted, the
        extractor is free to return None

    Returns
    -------
    TopologyWithLF
        The topology with loadflow results mapped onto it
    """
    stations = []
    for station in topology.stations:
        stations.append(map_loadflow_results_station(station, node_extractor, asset_extractor))

    return TopologyWithLF(**topology.model_dump(exclude=["stations"]), stations=stations)